In [ ]:
2+2
Out[ ]:
4
In [ ]:
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller

# Fetch historical data for SBUX
ticker = 'SBUX'
start_date = '2019-11-29'
end_date = '2025-02-25'
data = yf.download(ticker, start=start_date, end=end_date)

# Calculate moving averages
data['MA9'] = data['Close'].rolling(window=9).mean()
data['MA30'] = data['Close'].rolling(window=30).mean()

# Perform random walk test
# A simple random walk test can be performed by checking if the first difference of the series is white noise.
data['Returns'] = data['Close'].pct_change().dropna()
data['Diff'] = data['Close'].diff().dropna()

# Perform unit root test (Augmented Dickey-Fuller test)
adf_test = adfuller(data['Close'].dropna())

# Technical analysis plot
plt.figure(figsize=(14, 7))
plt.plot(data['Close'], label='Close Price')
plt.plot(data['MA9'], label='9-Day MA')
plt.plot(data['MA30'], label='30-Day MA')
plt.title('SBUX Stock Price with Moving Averages')
plt.legend()
plt.show()

# Display results of tests
print("Random Walk Test (First Difference):")
print(data['Diff'].describe())

print("\nAugmented Dickey-Fuller Test:")
print(f"ADF Statistic: {adf_test[0]}")
print(f"p-value: {adf_test[1]}")
for key, value in adf_test[4].items():
    print('Critial Values:')
    print(f'   {key}, {value}')
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
Random Walk Test (First Difference):
count    1314.000000
mean        0.028028
std         1.685190
min       -13.700859
25%        -0.802698
50%         0.009609
75%         0.821873
max        18.540703
Name: Diff, dtype: float64

Augmented Dickey-Fuller Test:
ADF Statistic: -1.8284551880647155
p-value: 0.36642877034172394
Critial Values:
   1%, -3.4353708501743654
Critial Values:
   5%, -2.8637572934525286
Critial Values:
   10%, -2.56795049999266
In [ ]:
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller

# Fetch historical data for NKE
ticker = 'NKE'
start_date = '2019-11-29'
end_date = '2025-02-25'
data = yf.download(ticker, start=start_date, end=end_date)

# Calculate moving averages
data['MA9'] = data['Close'].rolling(window=9).mean()
data['MA30'] = data['Close'].rolling(window=30).mean()

# Perform random walk test
# A simple random walk test can be performed by checking if the first difference of the series is white noise.
data['Returns'] = data['Close'].pct_change().dropna()
data['Diff'] = data['Close'].diff().dropna()

# Perform unit root test (Augmented Dickey-Fuller test)
adf_test = adfuller(data['Close'].dropna())

# Technical analysis plot
plt.figure(figsize=(14, 7))
plt.plot(data['Close'], label='Close Price')
plt.plot(data['MA9'], label='9-Day MA')
plt.plot(data['MA30'], label='30-Day MA')
plt.title('NKE Stock Price with Moving Averages')
plt.legend()
plt.show()

# Display results of tests
print("Random Walk Test (First Difference):")
print(data['Diff'].describe())

print("\nAugmented Dickey-Fuller Test:")
print(f"ADF Statistic: {adf_test[0]}")
print(f"p-value: {adf_test[1]}")
for key, value in adf_test[4].items():
    print('Critical Values:')
    print(f'   {key}, {value}')
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
Random Walk Test (First Difference):
count    1314.000000
mean       -0.006077
std         2.255578
min       -18.641289
25%        -1.097137
50%         0.019218
75%         1.141554
max        19.857124
Name: Diff, dtype: float64

Augmented Dickey-Fuller Test:
ADF Statistic: -1.6018055323406784
p-value: 0.48268642930999606
Critical Values:
   1%, -3.435336393256612
Critical Values:
   5%, -2.863742089354917
Critical Values:
   10%, -2.5679424031562683
In [ ]:
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller

# Fetch historical data for PFE
ticker = 'PFE'
start_date = '2019-11-29'
end_date = '2025-02-25'
data = yf.download(ticker, start=start_date, end=end_date)

# Calculate moving averages
data['MA9'] = data['Close'].rolling(window=9).mean()
data['MA30'] = data['Close'].rolling(window=30).mean()

# Perform random walk test
data['Returns'] = data['Close'].pct_change().dropna()
data['Diff'] = data['Close'].diff().dropna()

# Perform unit root test (Augmented Dickey-Fuller test)
adf_test = adfuller(data['Close'].dropna())

# Technical analysis plot
plt.figure(figsize=(14, 7))
plt.plot(data['Close'], label='Close Price')
plt.plot(data['MA9'], label='9-Day MA')
plt.plot(data['MA30'], label='30-Day MA')
plt.title('PFE Price with Moving Averages')
plt.legend()
plt.show()

# Display results of tests
print("Random Walk Test (First Difference):")
print(data['Diff'].describe())

print("\nAugmented Dickey-Fuller Test:")
print(f"ADF Statistic: {adf_test[0]}")
print(f"p-value: {adf_test[1]}")
for key, value in adf_test[4].items():
    print('Critical Values:')
    print(f'   {key}, {value}')
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
Random Walk Test (First Difference):
count    1314.000000
mean       -0.001806
std         0.580542
min        -2.396431
25%        -0.292612
50%        -0.027138
75%         0.271390
max         4.088535
Name: Diff, dtype: float64

Augmented Dickey-Fuller Test:
ADF Statistic: -1.4112569276512057
p-value: 0.5768676662505499
Critical Values:
   1%, -3.4354254066484664
Critical Values:
   5%, -2.863781366113082
Critical Values:
   10%, -2.56796331977745
In [ ]:
import nbformat
from nbconvert import HTMLExporter
import argparse

def convert_ipynb_to_html(ipynb_file, html_file):
    # Load the notebook
    with open(ipynb_file, 'r', encoding='utf-8') as f:
        notebook_content = nbformat.read(f, as_version=4)

    # Convert the notebook to HTML
    html_exporter = HTMLExporter()
    (body, resources) = html_exporter.from_notebook_node(notebook_content)

    # Write the HTML to a file
    with open(html_file, 'w', encoding='utf-8') as f:
        f.write(body)
    print(f"Converted {ipynb_file} to {html_file}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Convert Jupyter Notebook to HTML')
    parser.add_argument('ipynb_file', type=str, help='Path to the input .ipynb file')
    parser.add_argument('html_file', type=str, help='Path to the output .html file')
    args = parser.parse_args()

    convert_ipynb_to_html(args.ipynb_file, args.html_file)
usage: colab_kernel_launcher.py [-h] ipynb_file html_file
colab_kernel_launcher.py: error: the following arguments are required: html_file
An exception has occurred, use %tb to see the full traceback.

SystemExit: 2
/usr/local/lib/python3.11/dist-packages/IPython/core/interactiveshell.py:3561: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.
  warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
In [ ]:
import nbformat
from nbconvert import HTMLExporter
# import argparse # No need for argparse in this case

def convert_ipynb_to_html(ipynb_file, html_file):
    # Load the notebook
    with open(ipynb_file, 'r', encoding='utf-8') as f:
        notebook_content = nbformat.read(f, as_version=4)

    # Convert the notebook to HTML
    html_exporter = HTMLExporter()
    (body, resources) = html_exporter.from_notebook_node(notebook_content)

    # Write the HTML to a file
    with open(html_file, 'w', encoding='utf-8') as f:
        f.write(body)
    print(f"Converted {ipynb_file} to {html_file}")

# Example usage within a Jupyter Notebook:
# Replace with your actual file paths
ipynb_file_path = 'your_notebook.ipynb'
html_file_path = 'output.html'

convert_ipynb_to_html(ipynb_file_path, html_file_path)
---------------------------------------------------------------------------
FileNotFoundError                         Traceback (most recent call last)
<ipython-input-40-ae2276220ceb> in <cell line: 0>()
     22 html_file_path = 'output.html'
     23 
---> 24 convert_ipynb_to_html(ipynb_file_path, html_file_path)

<ipython-input-40-ae2276220ceb> in convert_ipynb_to_html(ipynb_file, html_file)
      5 def convert_ipynb_to_html(ipynb_file, html_file):
      6     # Load the notebook
----> 7     with open(ipynb_file, 'r', encoding='utf-8') as f:
      8         notebook_content = nbformat.read(f, as_version=4)
      9 

FileNotFoundError: [Errno 2] No such file or directory: 'your_notebook.ipynb'
In [ ]:
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller

# Fetch historical data for SBUX
ticker = 'SBUX'
start_date = '2019-11-29'
end_date = '2025-02-25'
data = yf.download(ticker, start=start_date, end=end_date)

# Calculate moving averages
data['MA9'] = data['Close'].rolling(window=9).mean()
data['MA30'] = data['Close'].rolling(window=30).mean()

# Perform random walk test
# A simple random walk test can be performed by checking if the first difference of the series is white noise.
data['Returns'] = data['Close'].pct_change().dropna()
data['Diff'] = data['Close'].diff().dropna()

# Perform unit root test (Augmented Dickey-Fuller test)
adf_test = adfuller(data['Close'].dropna())

# Technical analysis plot
plt.figure(figsize=(14, 7))
plt.plot(data['Close'], label='Close Price')
plt.plot(data['MA9'], label='9-Day MA')
plt.plot(data['MA30'], label='30-Day MA')
plt.title('SBUX Stock Price with Moving Averages')
plt.legend()
plt.show()

# Display results of tests
print("Random Walk Test (First Difference):")
print(data['Diff'].describe())

print("\nAugmented Dickey-Fuller Test:")
print(f"ADF Statistic: {adf_test[0]}")
print(f"p-value: {adf_test[1]}")
for key, value in adf_test[4].items():
    print('Critial Values:')
    print(f'   {key}, {value}')
YF.download() has changed argument auto_adjust default to True
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
Random Walk Test (First Difference):
count    1314.000000
mean        0.028028
std         1.685190
min       -13.700859
25%        -0.802698
50%         0.009609
75%         0.821873
max        18.540703
Name: Diff, dtype: float64

Augmented Dickey-Fuller Test:
ADF Statistic: -1.8284551880647155
p-value: 0.36642877034172394
Critial Values:
   1%, -3.4353708501743654
Critial Values:
   5%, -2.8637572934525286
Critial Values:
   10%, -2.56795049999266
In [ ]:
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller

# Fetch historical data for NKE
ticker = 'NKE'
start_date = '2019-11-29'
end_date = '2025-02-25'
data = yf.download(ticker, start=start_date, end=end_date)

# Calculate moving averages
data['MA9'] = data['Close'].rolling(window=9).mean()
data['MA30'] = data['Close'].rolling(window=30).mean()

# Perform random walk test
# A simple random walk test can be performed by checking if the first difference of the series is white noise.
data['Returns'] = data['Close'].pct_change().dropna()
data['Diff'] = data['Close'].diff().dropna()

# Perform unit root test (Augmented Dickey-Fuller test)
adf_test = adfuller(data['Close'].dropna())

# Technical analysis plot
plt.figure(figsize=(14, 7))
plt.plot(data['Close'], label='Close Price')
plt.plot(data['MA9'], label='9-Day MA')
plt.plot(data['MA30'], label='30-Day MA')
plt.title('NKE Stock Price with Moving Averages')
plt.legend()
plt.show()

# Display results of tests
print("Random Walk Test (First Difference):")
print(data['Diff'].describe())

print("\nAugmented Dickey-Fuller Test:")
print(f"ADF Statistic: {adf_test[0]}")
print(f"p-value: {adf_test[1]}")
for key, value in adf_test[4].items():
    print('Critical Values:')
    print(f'   {key}, {value}')
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
Random Walk Test (First Difference):
count    1314.000000
mean       -0.006077
std         2.255578
min       -18.641289
25%        -1.097137
50%         0.019218
75%         1.141554
max        19.857124
Name: Diff, dtype: float64

Augmented Dickey-Fuller Test:
ADF Statistic: -1.6018055323406784
p-value: 0.48268642930999606
Critical Values:
   1%, -3.435336393256612
Critical Values:
   5%, -2.863742089354917
Critical Values:
   10%, -2.5679424031562683
In [ ]:
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller

# Fetch historical data for STARL-USD
ticker = 'STARL-USD'
start_date = '2019-11-29'
end_date = '2025-02-25'
data = yf.download(ticker, start=start_date, end=end_date)

# Calculate moving averages
data['MA9'] = data['Close'].rolling(window=9).mean()
data['MA30'] = data['Close'].rolling(window=30).mean()

# Perform random walk test
data['Returns'] = data['Close'].pct_change().dropna()
data['Diff'] = data['Close'].diff().dropna()

# Perform unit root test (Augmented Dickey-Fuller test)
adf_test = adfuller(data['Close'].dropna())

# Technical analysis plot
plt.figure(figsize=(14, 7))
plt.plot(data['Close'], label='Close Price')
plt.plot(data['MA9'], label='9-Day MA')
plt.plot(data['MA30'], label='30-Day MA')
plt.title('STARL-USD Price with Moving Averages')
plt.legend()
plt.show()

# Display results of tests
print("Random Walk Test (First Difference):")
print(data['Diff'].describe())

print("\nAugmented Dickey-Fuller Test:")
print(f"ADF Statistic: {adf_test[0]}")
print(f"p-value: {adf_test[1]}")
for key, value in adf_test[4].items():
    print('Critical Values:')
    print(f'   {key}, {value}')
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
Random Walk Test (First Difference):
count    1325.000000
mean        0.000000
std         0.000002
min        -0.000015
25%         0.000000
50%         0.000000
75%         0.000000
max         0.000022
Name: Diff, dtype: float64

Augmented Dickey-Fuller Test:
ADF Statistic: -2.6146923071676897
p-value: 0.08999852761165716
Critical Values:
   1%, -3.435378572037035
Critical Values:
   5%, -2.863760700696655
Critical Values:
   10%, -2.56795231450063
In [ ]:
#@title Convert ipynb to HTML in Colab
# Upload ipynb
from google.colab import files
f = files.upload()

# Convert ipynb to html
import subprocess
file0 = list(f.keys())[0]
_ = subprocess.run(["pip", "install", "nbconvert"])
_ = subprocess.run(["jupyter", "nbconvert", file0, "--to", "html"])

# download the html
files.download(file0[:-5]+"html")
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
In [ ]:
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
In [ ]:
%%shell
jupyter nbconvert --to html /content/drive/MyDrive/6to_semestre_feb-jun_2025/series_de_tiempo/act_2_2.ipynb
[NbConvertApp] WARNING | pattern '/content/drive/MyDrive/6to_semestre_feb-jun_2025/series_de_tiempo/act_2_2.ipynb' matched no files
This application is used to convert notebook files (*.ipynb)
        to various other formats.

        WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.

Options
=======
The options below are convenience aliases to configurable class-options,
as listed in the "Equivalent to" description-line of the aliases.
To see all configurable class-options for some <cmd>, use:
    <cmd> --help-all

--debug
    set log level to logging.DEBUG (maximize logging output)
    Equivalent to: [--Application.log_level=10]
--show-config
    Show the application's configuration (human-readable format)
    Equivalent to: [--Application.show_config=True]
--show-config-json
    Show the application's configuration (json format)
    Equivalent to: [--Application.show_config_json=True]
--generate-config
    generate default config file
    Equivalent to: [--JupyterApp.generate_config=True]
-y
    Answer yes to any questions instead of prompting.
    Equivalent to: [--JupyterApp.answer_yes=True]
--execute
    Execute the notebook prior to export.
    Equivalent to: [--ExecutePreprocessor.enabled=True]
--allow-errors
    Continue notebook execution even if one of the cells throws an error and include the error message in the cell output (the default behaviour is to abort conversion). This flag is only relevant if '--execute' was specified, too.
    Equivalent to: [--ExecutePreprocessor.allow_errors=True]
--stdin
    read a single notebook file from stdin. Write the resulting notebook with default basename 'notebook.*'
    Equivalent to: [--NbConvertApp.from_stdin=True]
--stdout
    Write notebook output to stdout instead of files.
    Equivalent to: [--NbConvertApp.writer_class=StdoutWriter]
--inplace
    Run nbconvert in place, overwriting the existing notebook (only
            relevant when converting to notebook format)
    Equivalent to: [--NbConvertApp.use_output_suffix=False --NbConvertApp.export_format=notebook --FilesWriter.build_directory=]
--clear-output
    Clear output of current file and save in place,
            overwriting the existing notebook.
    Equivalent to: [--NbConvertApp.use_output_suffix=False --NbConvertApp.export_format=notebook --FilesWriter.build_directory= --ClearOutputPreprocessor.enabled=True]
--coalesce-streams
    Coalesce consecutive stdout and stderr outputs into one stream (within each cell).
    Equivalent to: [--NbConvertApp.use_output_suffix=False --NbConvertApp.export_format=notebook --FilesWriter.build_directory= --CoalesceStreamsPreprocessor.enabled=True]
--no-prompt
    Exclude input and output prompts from converted document.
    Equivalent to: [--TemplateExporter.exclude_input_prompt=True --TemplateExporter.exclude_output_prompt=True]
--no-input
    Exclude input cells and output prompts from converted document.
            This mode is ideal for generating code-free reports.
    Equivalent to: [--TemplateExporter.exclude_output_prompt=True --TemplateExporter.exclude_input=True --TemplateExporter.exclude_input_prompt=True]
--allow-chromium-download
    Whether to allow downloading chromium if no suitable version is found on the system.
    Equivalent to: [--WebPDFExporter.allow_chromium_download=True]
--disable-chromium-sandbox
    Disable chromium security sandbox when converting to PDF..
    Equivalent to: [--WebPDFExporter.disable_sandbox=True]
--show-input
    Shows code input. This flag is only useful for dejavu users.
    Equivalent to: [--TemplateExporter.exclude_input=False]
--embed-images
    Embed the images as base64 dataurls in the output. This flag is only useful for the HTML/WebPDF/Slides exports.
    Equivalent to: [--HTMLExporter.embed_images=True]
--sanitize-html
    Whether the HTML in Markdown cells and cell outputs should be sanitized..
    Equivalent to: [--HTMLExporter.sanitize_html=True]
--log-level=<Enum>
    Set the log level by value or name.
    Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']
    Default: 30
    Equivalent to: [--Application.log_level]
--config=<Unicode>
    Full path of a config file.
    Default: ''
    Equivalent to: [--JupyterApp.config_file]
--to=<Unicode>
    The export format to be used, either one of the built-in formats
            ['asciidoc', 'custom', 'html', 'latex', 'markdown', 'notebook', 'pdf', 'python', 'qtpdf', 'qtpng', 'rst', 'script', 'slides', 'webpdf']
            or a dotted object name that represents the import path for an
            ``Exporter`` class
    Default: ''
    Equivalent to: [--NbConvertApp.export_format]
--template=<Unicode>
    Name of the template to use
    Default: ''
    Equivalent to: [--TemplateExporter.template_name]
--template-file=<Unicode>
    Name of the template file to use
    Default: None
    Equivalent to: [--TemplateExporter.template_file]
--theme=<Unicode>
    Template specific theme(e.g. the name of a JupyterLab CSS theme distributed
    as prebuilt extension for the lab template)
    Default: 'light'
    Equivalent to: [--HTMLExporter.theme]
--sanitize_html=<Bool>
    Whether the HTML in Markdown cells and cell outputs should be sanitized.This
    should be set to True by nbviewer or similar tools.
    Default: False
    Equivalent to: [--HTMLExporter.sanitize_html]
--writer=<DottedObjectName>
    Writer class used to write the
                                        results of the conversion
    Default: 'FilesWriter'
    Equivalent to: [--NbConvertApp.writer_class]
--post=<DottedOrNone>
    PostProcessor class used to write the
                                        results of the conversion
    Default: ''
    Equivalent to: [--NbConvertApp.postprocessor_class]
--output=<Unicode>
    Overwrite base name use for output files.
                Supports pattern replacements '{notebook_name}'.
    Default: '{notebook_name}'
    Equivalent to: [--NbConvertApp.output_base]
--output-dir=<Unicode>
    Directory to write output(s) to. Defaults
                                  to output to the directory of each notebook. To recover
                                  previous default behaviour (outputting to the current
                                  working directory) use . as the flag value.
    Default: ''
    Equivalent to: [--FilesWriter.build_directory]
--reveal-prefix=<Unicode>
    The URL prefix for reveal.js (version 3.x).
            This defaults to the reveal CDN, but can be any url pointing to a copy
            of reveal.js.
            For speaker notes to work, this must be a relative path to a local
            copy of reveal.js: e.g., "reveal.js".
            If a relative path is given, it must be a subdirectory of the
            current directory (from which the server is run).
            See the usage documentation
            (https://nbconvert.readthedocs.io/en/latest/usage.html#reveal-js-html-slideshow)
            for more details.
    Default: ''
    Equivalent to: [--SlidesExporter.reveal_url_prefix]
--nbformat=<Enum>
    The nbformat version to write.
            Use this to downgrade notebooks.
    Choices: any of [1, 2, 3, 4]
    Default: 4
    Equivalent to: [--NotebookExporter.nbformat_version]

Examples
--------

    The simplest way to use nbconvert is

            > jupyter nbconvert mynotebook.ipynb --to html

            Options include ['asciidoc', 'custom', 'html', 'latex', 'markdown', 'notebook', 'pdf', 'python', 'qtpdf', 'qtpng', 'rst', 'script', 'slides', 'webpdf'].

            > jupyter nbconvert --to latex mynotebook.ipynb

            Both HTML and LaTeX support multiple output templates. LaTeX includes
            'base', 'article' and 'report'.  HTML includes 'basic', 'lab' and
            'classic'. You can specify the flavor of the format used.

            > jupyter nbconvert --to html --template lab mynotebook.ipynb

            You can also pipe the output to stdout, rather than a file

            > jupyter nbconvert mynotebook.ipynb --stdout

            PDF is generated via latex

            > jupyter nbconvert mynotebook.ipynb --to pdf

            You can get (and serve) a Reveal.js-powered slideshow

            > jupyter nbconvert myslides.ipynb --to slides --post serve

            Multiple notebooks can be given at the command line in a couple of
            different ways:

            > jupyter nbconvert notebook*.ipynb
            > jupyter nbconvert notebook1.ipynb notebook2.ipynb

            or you can specify the notebooks list in a config file, containing::

                c.NbConvertApp.notebooks = ["my_notebook.ipynb"]

            > jupyter nbconvert --config mycfg.py

To see all available configurables, use `--help-all`.

---------------------------------------------------------------------------
CalledProcessError                        Traceback (most recent call last)
<ipython-input-4-5f2f637c64da> in <cell line: 0>()
----> 1 get_ipython().run_cell_magic('shell', '', 'jupyter nbconvert --to html /content/drive/MyDrive/6to_semestre_feb-jun_2025/series_de_tiempo/act_2_2.ipynb\n')

/usr/local/lib/python3.11/dist-packages/google/colab/_shell.py in run_cell_magic(self, magic_name, line, cell)
    356     if line and not cell:
    357       cell = ' '
--> 358     return super().run_cell_magic(magic_name, line, cell)
    359 
    360 

/usr/local/lib/python3.11/dist-packages/IPython/core/interactiveshell.py in run_cell_magic(self, magic_name, line, cell)
   2471             with self.builtin_trap:
   2472                 args = (magic_arg_s, cell)
-> 2473                 result = fn(*args, **kwargs)
   2474             return result
   2475 

/usr/local/lib/python3.11/dist-packages/google/colab/_system_commands.py in _shell_cell_magic(args, cmd)
    110   result = _run_command(cmd, clear_streamed_output=False)
    111   if not parsed_args.ignore_errors:
--> 112     result.check_returncode()
    113   return result
    114 

/usr/local/lib/python3.11/dist-packages/google/colab/_system_commands.py in check_returncode(self)
    135   def check_returncode(self):
    136     if self.returncode:
--> 137       raise subprocess.CalledProcessError(
    138           returncode=self.returncode, cmd=self.args, output=self.output
    139       )

CalledProcessError: Command 'jupyter nbconvert --to html /content/drive/MyDrive/6to_semestre_feb-jun_2025/series_de_tiempo/act_2_2.ipynb
' returned non-zero exit status 255.
In [ ]:
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller

# Fetch historical data for SBUX
ticker = 'SBUX'
start_date = '2019-11-29'
end_date = '2025-02-25'
data = yf.download(ticker, start=start_date, end=end_date)

# Calculate moving averages
data['MA9'] = data['Close'].rolling(window=9).mean()
data['MA30'] = data['Close'].rolling(window=30).mean()

# Perform random walk test
# A simple random walk test can be performed by checking if the first difference of the series is white noise.
data['Returns'] = data['Close'].pct_change().dropna()
data['Diff'] = data['Close'].diff().dropna()

# Perform unit root test (Augmented Dickey-Fuller test)
adf_test = adfuller(data['Close'].dropna())

# Technical analysis plot
plt.figure(figsize=(14, 7))
plt.plot(data['Close'], label='Close Price')
plt.plot(data['MA9'], label='9-Day MA')
plt.plot(data['MA30'], label='30-Day MA')
plt.title('SBUX Stock Price with Moving Averages')
plt.legend()
plt.show()

# Display results of tests
print("Random Walk Test (First Difference):")
print(data['Diff'].describe())

print("\nAugmented Dickey-Fuller Test:")
print(f"ADF Statistic: {adf_test[0]}")
print(f"p-value: {adf_test[1]}")
for key, value in adf_test[4].items():
    print('Critial Values:')
    print(f'   {key}, {value}')
YF.download() has changed argument auto_adjust default to True
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
Random Walk Test (First Difference):
count    1314.000000
mean        0.028028
std         1.685190
min       -13.700859
25%        -0.802698
50%         0.009609
75%         0.821873
max        18.540703
Name: Diff, dtype: float64

Augmented Dickey-Fuller Test:
ADF Statistic: -1.8284551880647155
p-value: 0.36642877034172394
Critial Values:
   1%, -3.4353708501743654
Critial Values:
   5%, -2.8637572934525286
Critial Values:
   10%, -2.56795049999266
In [ ]:
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller

# Fetch historical data for NKE
ticker = 'NKE'
start_date = '2019-11-29'
end_date = '2025-02-25'
data = yf.download(ticker, start=start_date, end=end_date)

# Calculate moving averages
data['MA9'] = data['Close'].rolling(window=9).mean()
data['MA30'] = data['Close'].rolling(window=30).mean()

# Perform random walk test
# A simple random walk test can be performed by checking if the first difference of the series is white noise.
data['Returns'] = data['Close'].pct_change().dropna()
data['Diff'] = data['Close'].diff().dropna()

# Perform unit root test (Augmented Dickey-Fuller test)
adf_test = adfuller(data['Close'].dropna())

# Technical analysis plot
plt.figure(figsize=(14, 7))
plt.plot(data['Close'], label='Close Price')
plt.plot(data['MA9'], label='9-Day MA')
plt.plot(data['MA30'], label='30-Day MA')
plt.title('NKE Stock Price with Moving Averages')
plt.legend()
plt.show()

# Display results of tests
print("Random Walk Test (First Difference):")
print(data['Diff'].describe())

print("\nAugmented Dickey-Fuller Test:")
print(f"ADF Statistic: {adf_test[0]}")
print(f"p-value: {adf_test[1]}")
for key, value in adf_test[4].items():
    print('Critical Values:')
    print(f'   {key}, {value}')
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
Random Walk Test (First Difference):
count    1314.000000
mean       -0.006077
std         2.255578
min       -18.641289
25%        -1.097137
50%         0.019218
75%         1.141554
max        19.857124
Name: Diff, dtype: float64

Augmented Dickey-Fuller Test:
ADF Statistic: -1.6018055323406784
p-value: 0.48268642930999606
Critical Values:
   1%, -3.435336393256612
Critical Values:
   5%, -2.863742089354917
Critical Values:
   10%, -2.5679424031562683
In [ ]:
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller

# Fetch historical data for STARL-USD
ticker = 'STARL-USD'
start_date = '2019-11-29'
end_date = '2025-02-25'
data = yf.download(ticker, start=start_date, end=end_date)

# Calculate moving averages
data['MA9'] = data['Close'].rolling(window=9).mean()
data['MA30'] = data['Close'].rolling(window=30).mean()

# Perform random walk test
data['Returns'] = data['Close'].pct_change().dropna()
data['Diff'] = data['Close'].diff().dropna()

# Perform unit root test (Augmented Dickey-Fuller test)
adf_test = adfuller(data['Close'].dropna())

# Technical analysis plot
plt.figure(figsize=(14, 7))
plt.plot(data['Close'], label='Close Price')
plt.plot(data['MA9'], label='9-Day MA')
plt.plot(data['MA30'], label='30-Day MA')
plt.title('STARL-USD Price with Moving Averages')
plt.legend()
plt.show()

# Display results of tests
print("Random Walk Test (First Difference):")
print(data['Diff'].describe())

print("\nAugmented Dickey-Fuller Test:")
print(f"ADF Statistic: {adf_test[0]}")
print(f"p-value: {adf_test[1]}")
for key, value in adf_test[4].items():
    print('Critical Values:')
    print(f'   {key}, {value}')
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
Random Walk Test (First Difference):
count    1325.000000
mean        0.000000
std         0.000002
min        -0.000015
25%         0.000000
50%         0.000000
75%         0.000000
max         0.000022
Name: Diff, dtype: float64

Augmented Dickey-Fuller Test:
ADF Statistic: -2.6146923071676897
p-value: 0.08999852761165716
Critical Values:
   1%, -3.435378572037035
Critical Values:
   5%, -2.863760700696655
Critical Values:
   10%, -2.56795231450063
In [ ]:
# Montar Google Drive
from google.colab import drive
drive.mount('/content/drive')

# Instalar nbconvert (por si acaso)
!pip install nbconvert

# Ruta y nombre del archivo
notebook_path = "/content/drive/My Drive/Colab Notebooks/act_2_2.ipynb"
output_name = "act_2_2.html"  # Nombre del archivo HTML de salida

# Verificar si el archivo existe
import os
if os.path.exists(notebook_path):
    print(f"El archivo {notebook_path} existe. Procediendo con la conversión...")
else:
    print(f"Error: El archivo {notebook_path} no se encuentra. Verifica la ruta y el nombre.")
    raise FileNotFoundError("Archivo no encontrado. Revisa la ruta.")

# Convertir el notebook a HTML
!jupyter nbconvert --to html "{notebook_path}" --output "{output_name}"
Mounted at /content/drive
Requirement already satisfied: nbconvert in /usr/local/lib/python3.11/dist-packages (7.16.6)
Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (4.13.3)
Requirement already satisfied: bleach!=5.0.0 in /usr/local/lib/python3.11/dist-packages (from bleach[css]!=5.0.0->nbconvert) (6.2.0)
Requirement already satisfied: defusedxml in /usr/local/lib/python3.11/dist-packages (from nbconvert) (0.7.1)
Requirement already satisfied: jinja2>=3.0 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (3.1.5)
Requirement already satisfied: jupyter-core>=4.7 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (5.7.2)
Requirement already satisfied: jupyterlab-pygments in /usr/local/lib/python3.11/dist-packages (from nbconvert) (0.3.0)
Requirement already satisfied: markupsafe>=2.0 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (3.0.2)
Requirement already satisfied: mistune<4,>=2.0.3 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (3.1.2)
Requirement already satisfied: nbclient>=0.5.0 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (0.10.2)
Requirement already satisfied: nbformat>=5.7 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (5.10.4)
Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from nbconvert) (24.2)
Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (1.5.1)
Requirement already satisfied: pygments>=2.4.1 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (2.18.0)
Requirement already satisfied: traitlets>=5.1 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (5.7.1)
Requirement already satisfied: webencodings in /usr/local/lib/python3.11/dist-packages (from bleach!=5.0.0->bleach[css]!=5.0.0->nbconvert) (0.5.1)
Requirement already satisfied: tinycss2<1.5,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from bleach[css]!=5.0.0->nbconvert) (1.4.0)
Requirement already satisfied: platformdirs>=2.5 in /usr/local/lib/python3.11/dist-packages (from jupyter-core>=4.7->nbconvert) (4.3.6)
Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.11/dist-packages (from nbclient>=0.5.0->nbconvert) (6.1.12)
Requirement already satisfied: fastjsonschema>=2.15 in /usr/local/lib/python3.11/dist-packages (from nbformat>=5.7->nbconvert) (2.21.1)
Requirement already satisfied: jsonschema>=2.6 in /usr/local/lib/python3.11/dist-packages (from nbformat>=5.7->nbconvert) (4.23.0)
Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.11/dist-packages (from beautifulsoup4->nbconvert) (2.6)
Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.11/dist-packages (from beautifulsoup4->nbconvert) (4.12.2)
Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (25.1.0)
Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (2024.10.1)
Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (0.36.2)
Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (0.23.1)
Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.11/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (24.0.1)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.11/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (2.8.2)
Requirement already satisfied: tornado>=4.1 in /usr/local/lib/python3.11/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (6.4.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.1->jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (1.17.0)
El archivo /content/drive/My Drive/Colab Notebooks/act_2_2.ipynb existe. Procediendo con la conversión...
[NbConvertApp] Converting notebook /content/drive/My Drive/Colab Notebooks/act_2_2.ipynb to html
[NbConvertApp] WARNING | Alternative text is missing on 3 image(s).
[NbConvertApp] Writing 697784 bytes to /content/drive/My Drive/Colab Notebooks/act_2_2.html
In [ ]:
!pip install statsmodels --upgrade
Requirement already satisfied: statsmodels in /usr/local/lib/python3.11/dist-packages (0.14.4)
Requirement already satisfied: numpy<3,>=1.22.3 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (1.26.4)
Requirement already satisfied: scipy!=1.9.2,>=1.8 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (1.13.1)
Requirement already satisfied: pandas!=2.1.0,>=1.4 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (2.2.2)
Requirement already satisfied: patsy>=0.5.6 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (1.0.1)
Requirement already satisfied: packaging>=21.3 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (24.2)
Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas!=2.1.0,>=1.4->statsmodels) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas!=2.1.0,>=1.4->statsmodels) (2025.1)
Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas!=2.1.0,>=1.4->statsmodels) (2025.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.8.2->pandas!=2.1.0,>=1.4->statsmodels) (1.17.0)
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
import matplotlib.pyplot as plt

# Upload Excel files
print("Please upload AAPL Excel file")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)  # Header en fila 1 (segunda fila)

print("Please upload MSFT Excel file")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)  # Header en fila 1 (segunda fila)

# Print available columns to debug
print("\nColumns in AAPL DataFrame:", list(aapl_df.columns))
print("Columns in MSFT DataFrame:", list(msft_df.columns))

# Extract closing prices (column is named ' Close' with a leading space)
aapl_close = aapl_df[' Close']
msft_close = msft_df[' Close']

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]

# Function for unit root tests
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")

    # ADF Test
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')

    # KPSS Test
    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')

# Perform unit root tests
unit_root_tests(aapl_close, "AAPL")
unit_root_tests(msft_close, "MSFT")

# Difference the series if non-stationary
aapl_diff = aapl_close.diff().dropna()
msft_diff = msft_close.diff().dropna()

# Function to find best ARMA model using ARIMA with d=0
def find_best_arma(series, name, max_p=3, max_q=3):
    best_aic = float('inf')
    best_order = None

    for p in range(max_p + 1):
        for q in range(max_q + 1):
            try:
                model = ARIMA(series, order=(p, 0, q))
                results = model.fit()
                if results.aic < best_aic:
                    best_aic = results.aic
                    best_order = (p, q)
            except:
                continue

    print(f"\nBest ARMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")

    # Fit and return best model
    best_model = ARIMA(series, order=(best_order[0], 0, best_order[1])).fit()
    return best_model

# Fit ARMA models
aapl_arma = find_best_arma(aapl_diff, "AAPL")
msft_arma = find_best_arma(msft_diff, "MSFT")

# Cointegration test
def cointegration_test(df):
    result = coint_johansen(df, det_order=0, k_ar_diff=1)
    print("\nJohansen Cointegration Test:")
    print(f"Trace statistic: {result.lr1}")
    print(f"Critical values (90%, 95%, 99%): {result.cvt}")

    for i in range(len(result.lr1)):
        if result.lr1[i] > result.cvt[i, 1]:  # 95% critical value
            print(f"r = {i}: Cointegration exists at 95% confidence level")
        else:
            print(f"r = {i}: No cointegration at 95% confidence level")

# Prepare data for cointegration
coint_df = pd.DataFrame({
    'AAPL': aapl_close,
    'MSFT': msft_close
}).dropna()

# Run cointegration test
cointegration_test(coint_df)

# Plot the series
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL')
plt.plot(msft_close, label='MSFT')
plt.title('AAPL vs MSFT Closing Prices')
plt.legend()
plt.show()

# Plot the differenced series
plt.figure(figsize=(12,6))
plt.plot(aapl_diff, label='AAPL Diff')
plt.plot(msft_diff, label='MSFT Diff')
plt.title('Differenced Series')
plt.legend()
plt.show()
Please upload AAPL Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (5).xlsx
Please upload MSFT Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (5).xlsx

Columns in AAPL DataFrame: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Columns in MSFT DataFrame: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']

Unit Root Tests for AAPL:
ADF Test:
ADF Statistic: -1.0119
p-value: 0.7489
Critical Values: {'1%': -3.4353174541055567, '5%': -2.863733732389869, '10%': -2.5679379527245407}

KPSS Test:
KPSS Statistic: 5.0423
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}

Unit Root Tests for MSFT:
ADF Test:
ADF Statistic: -0.7045
p-value: 0.8456
Critical Values: {'1%': -3.4353516488758684, '5%': -2.8637488209107196, '10%': -2.5679459879960373}

KPSS Test:
KPSS Statistic: 4.9191
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
<ipython-input-5-ec017b977b81>:47: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-5-ec017b977b81>:47: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
Best ARMA model for AAPL:
Order: (0, 0)
AIC: 6400.01
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
Best ARMA model for MSFT:
Order: (0, 1)
AIC: 7971.27
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
Johansen Cointegration Test:
Trace statistic: [14.05123397  1.26121423]
Critical values (90%, 95%, 99%): [[13.4294 15.4943 19.9349]
 [ 2.7055  3.8415  6.6349]]
r = 0: No cointegration at 95% confidence level
r = 1: No cointegration at 95% confidence level
No description has been provided for this image
No description has been provided for this image
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
import matplotlib.pyplot as plt

# Upload Excel files
print("Please upload AAPL Excel file")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)  # Header en fila 1 (segunda fila)

print("Please upload MSFT Excel file")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)  # Header en fila 1 (segunda fila)

# Print available columns
print("\nAAPL DataFrame columns:", list(aapl_df.columns))
print("MSFT DataFrame columns:", list(msft_df.columns))

# Ask user for the correct column name
close_column = input("Please enter the column name containing closing prices: ")

# Extract closing prices using user-specified column name
try:
    aapl_close = aapl_df[close_column]
    msft_close = msft_df[close_column]
except KeyError:
    print(f"Error: Column '{close_column}' not found in one or both DataFrames")
    print("Please check the column names and try again")
    raise

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]

# Function for unit root tests
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")

    # ADF Test
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')

    # KPSS Test
    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')

# Perform unit root tests
unit_root_tests(aapl_close, "AAPL")
unit_root_tests(msft_close, "MSFT")

# Cointegration test
def cointegration_test(df):
    result = coint_johansen(df, det_order=0, k_ar_diff=1)
    print("\nJohansen Cointegration Test:")
    print(f"Trace statistic: {result.lr1}")
    print(f"Critical values (90%, 95%, 99%): {result.cvt}")

    for i in range(len(result.lr1)):
        if result.lr1[i] > result.cvt[i, 1]:
            print(f"r = {i}: Cointegration exists at 95% confidence level")
        else:
            print(f"r = {i}: No cointegration at 95% confidence level")

# Prepare data for cointegration
coint_df = pd.DataFrame({
    'AAPL': aapl_close,
    'MSFT': msft_close
}).dropna()
cointegration_test(coint_df)

# Function to find best ARIMA model
def find_best_arima(series, name, max_p=3, max_d=2, max_q=3):
    best_aic = float('inf')
    best_order = None

    for p in range(max_p + 1):
        for d in range(max_d + 1):
            for q in range(max_q + 1):
                try:
                    model = ARIMA(series, order=(p, d, q))
                    results = model.fit()
                    if results.aic < best_aic:
                        best_aic = results.aic
                        best_order = (p, d, q)
                except:
                    continue

    print(f"\nBest ARIMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")
    return best_order

# Find and fit best ARIMA models
aapl_order = find_best_arima(aapl_close, "AAPL")
msft_order = find_best_arima(msft_close, "MSFT")

# Fit final ARIMA models
aapl_model = ARIMA(aapl_close, order=aapl_order).fit()
msft_model = ARIMA(msft_close, order=msft_order).fit()

# Forecast next 30 periods
forecast_steps = 30
aapl_forecast = aapl_model.forecast(steps=forecast_steps)
msft_forecast = msft_model.forecast(steps=forecast_steps)

# Create forecast index (assuming daily data)
last_index = len(aapl_close) - 1
forecast_index = range(last_index + 1, last_index + 1 + forecast_steps)

# Plot original series with forecasts
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(forecast_index, aapl_forecast, label='AAPL Forecast', color='red')
plt.plot(msft_close, label='MSFT Historical')
plt.plot(forecast_index, msft_forecast, label='MSFT Forecast', color='green')
plt.title('AAPL and MSFT Closing Prices with Forecasts')
plt.legend()
plt.show()

# Plot detailed forecast with confidence intervals
def plot_forecast(model, series, name, steps=30):
    forecast_obj = model.get_forecast(steps=steps)
    forecast = forecast_obj.predicted_mean
    conf_int = forecast_obj.conf_int()

    forecast_index = range(len(series), len(series) + steps)

    plt.figure(figsize=(12,6))
    plt.plot(series, label=f'{name} Historical')
    plt.plot(forecast_index, forecast, label='Forecast', color='red')
    plt.fill_between(forecast_index,
                    conf_int.iloc[:, 0],
                    conf_int.iloc[:, 1],
                    color='pink',
                    alpha=0.3,
                    label='95% Confidence Interval')
    plt.title(f'{name} Price Forecast')
    plt.legend()
    plt.show()

# Generate detailed forecast plots
plot_forecast(aapl_model, aapl_close, "AAPL")
plot_forecast(msft_model, msft_close, "MSFT")

# Print forecast values
print("\nAAPL Forecast Values (next 5 periods):")
print(aapl_forecast[:5])
print("\nMSFT Forecast Values (next 5 periods):")
print(msft_forecast[:5])
Please upload AAPL Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (6).xlsx
Please upload MSFT Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (6).xlsx

AAPL DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
MSFT DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Please enter the column name containing closing prices:  Close

Unit Root Tests for AAPL:
ADF Test:
ADF Statistic: -1.0119
p-value: 0.7489
Critical Values: {'1%': -3.4353174541055567, '5%': -2.863733732389869, '10%': -2.5679379527245407}

KPSS Test:
KPSS Statistic: 5.0423
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}

Unit Root Tests for MSFT:
ADF Test:
ADF Statistic: -0.7045
p-value: 0.8456
Critical Values: {'1%': -3.4353516488758684, '5%': -2.8637488209107196, '10%': -2.5679459879960373}

KPSS Test:
KPSS Statistic: 4.9191
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}

Johansen Cointegration Test:
Trace statistic: [14.05123397  1.26121423]
Critical values (90%, 95%, 99%): [[13.4294 15.4943 19.9349]
 [ 2.7055  3.8415  6.6349]]
r = 0: No cointegration at 95% confidence level
r = 1: No cointegration at 95% confidence level
<ipython-input-6-1840fc480155>:55: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-6-1840fc480155>:55: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
Best ARIMA model for AAPL:
Order: (2, 2, 3)
AIC: 6396.34
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
Best ARIMA model for MSFT:
Order: (3, 2, 3)
AIC: 7970.16
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
AAPL Forecast Values (next 5 periods):
1320    66.324734
1321    66.663007
1322    66.062213
1323    66.298968
1324    65.988663
Name: predicted_mean, dtype: float64

MSFT Forecast Values (next 5 periods):
1320    150.906327
1321    150.843280
1322    150.616818
1323    150.419737
1324    150.312816
Name: predicted_mean, dtype: float64
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
import matplotlib.pyplot as plt

# Upload Excel files
print("Please upload AAPL Excel file")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)  # Header en fila 1 (segunda fila)

print("Please upload MSFT Excel file")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)  # Header en fila 1 (segunda fila)

# Print available columns
print("\nAAPL DataFrame columns:", list(aapl_df.columns))
print("MSFT DataFrame columns:", list(msft_df.columns))

# Ask user for the correct column name
close_column = input("Please enter the column name containing closing prices: ")

# Extract closing prices using user-specified column name
try:
    aapl_close = aapl_df[close_column]
    msft_close = msft_df[close_column]
except KeyError:
    print(f"Error: Column '{close_column}' not found in one or both DataFrames")
    print("Please check the column names and try again")
    raise

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]

# Function for unit root tests with interpretation
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")

    # ADF Test
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')
    print("Interpretation:")
    if adf_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be non-stationary")

    # KPSS Test
    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')
    print("Interpretation:")
    if kpss_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is non-stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be stationary")

# Perform unit root tests
unit_root_tests(aapl_close, "AAPL")
unit_root_tests(msft_close, "MSFT")

# Cointegration test with interpretation
def cointegration_test(df):
    result = coint_johansen(df, det_order=0, k_ar_diff=1)
    print("\nJohansen Cointegration Test:")
    print(f"Trace statistic: {result.lr1}")
    print(f"Critical values (90%, 95%, 99%): {result.cvt}")
    print("Interpretation:")
    for i in range(len(result.lr1)):
        if result.lr1[i] > result.cvt[i, 1]:
            print(f"  - r = {i}: Cointegration exists at 95% confidence level")
            print(f"    Trace statistic ({result.lr1[i]:.2f}) > 95% critical value ({result.cvt[i, 1]:.2f})")
        else:
            print(f"  - r = {i}: No cointegration at 95% confidence level")
            print(f"    Trace statistic ({result.lr1[i]:.2f}) <= 95% critical value ({result.cvt[i, 1]:.2f})")
    if result.lr1[0] > result.cvt[0, 1]:
        print("Conclusion: AAPL and MSFT are cointegrated - they share a long-run equilibrium relationship")
    else:
        print("Conclusion: No evidence of cointegration between AAPL and MSFT")

# Prepare data for cointegration
coint_df = pd.DataFrame({
    'AAPL': aapl_close,
    'MSFT': msft_close
}).dropna()
cointegration_test(coint_df)

# Function to find best ARIMA model with interpretation
def find_best_arima(series, name, max_p=3, max_d=2, max_q=3):
    best_aic = float('inf')
    best_order = None

    for p in range(max_p + 1):
        for d in range(max_d + 1):
            for q in range(max_q + 1):
                try:
                    model = ARIMA(series, order=(p, d, q))
                    results = model.fit()
                    if results.aic < best_aic:
                        best_aic = results.aic
                        best_order = (p, d, q)
                except:
                    continue

    print(f"\nBest ARIMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")
    print("Interpretation:")
    print(f"  - p={best_order[0]}: {best_order[0]} autoregressive term(s)")
    print(f"  - d={best_order[1]}: {best_order[1]} difference(s) needed for stationarity")
    print(f"  - q={best_order[2]}: {best_order[2]} moving average term(s)")
    return best_order

# Find and fit best ARIMA models
aapl_order = find_best_arima(aapl_close, "AAPL")
msft_order = find_best_arima(msft_close, "MSFT")

# Fit final ARIMA models
aapl_model = ARIMA(aapl_close, order=aapl_order).fit()
msft_model = ARIMA(msft_close, order=msft_order).fit()

# Forecast next 30 periods
forecast_steps = 30
aapl_forecast = aapl_model.forecast(steps=forecast_steps)
msft_forecast = msft_model.forecast(steps=forecast_steps)

# Create forecast index
last_index = len(aapl_close) - 1
forecast_index = range(last_index + 1, last_index + 1 + forecast_steps)

# Plot original series with forecasts
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(forecast_index, aapl_forecast, label='AAPL Forecast', color='red')
plt.plot(msft_close, label='MSFT Historical')
plt.plot(forecast_index, msft_forecast, label='MSFT Forecast', color='green')
plt.title('AAPL and MSFT Closing Prices with Forecasts')
plt.legend()
plt.show()

# Detailed forecast plot with confidence intervals and interpretation
def plot_forecast(model, series, name, steps=30):
    forecast_obj = model.get_forecast(steps=steps)
    forecast = forecast_obj.predicted_mean
    conf_int = forecast_obj.conf_int()

    forecast_index = range(len(series), len(series) + steps)

    plt.figure(figsize=(12,6))
    plt.plot(series, label=f'{name} Historical')
    plt.plot(forecast_index, forecast, label='Forecast', color='red')
    plt.fill_between(forecast_index,
                    conf_int.iloc[:, 0],
                    conf_int.iloc[:, 1],
                    color='pink',
                    alpha=0.3,
                    label='95% Confidence Interval')
    plt.title(f'{name} Price Forecast')
    plt.legend()
    plt.show()

    # Forecast interpretation
    last_value = series.iloc[-1]
    mean_forecast = forecast.mean()
    print(f"\nForecast Interpretation for {name}:")
    print(f"Last observed value: {last_value:.2f}")
    print(f"Average forecast value: {mean_forecast:.2f}")
    print(f"Forecast change: {mean_forecast - last_value:.2f}")
    if mean_forecast > last_value:
        print("Trend: Upward forecast trend")
    elif mean_forecast < last_value:
        print("Trend: Downward forecast trend")
    else:
        print("Trend: Flat forecast trend")
    print(f"95% CI range at period {steps}: [{conf_int.iloc[-1, 0]:.2f}, {conf_int.iloc[-1, 1]:.2f}]")

# Generate detailed forecast plots and interpretations
plot_forecast(aapl_model, aapl_close, "AAPL")
plot_forecast(msft_model, msft_close, "MSFT")

# Print forecast values
print("\nAAPL Forecast Values (next 5 periods):")
print(aapl_forecast[:5])
print("\nMSFT Forecast Values (next 5 periods):")
print(msft_forecast[:5])
Please upload AAPL Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (7).xlsx
Please upload MSFT Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (7).xlsx

AAPL DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
MSFT DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Please enter the column name containing closing prices:  Close

Unit Root Tests for AAPL:
ADF Test:
ADF Statistic: -1.0119
p-value: 0.7489
Critical Values: {'1%': -3.4353174541055567, '5%': -2.863733732389869, '10%': -2.5679379527245407}
Interpretation:
  - p-value >= 0.05: Fail to reject null - AAPL may be non-stationary

KPSS Test:
KPSS Statistic: 5.0423
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - AAPL is non-stationary

Unit Root Tests for MSFT:
<ipython-input-7-8d5326ea4ab0>:60: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-7-8d5326ea4ab0>:60: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
ADF Test:
ADF Statistic: -0.7045
p-value: 0.8456
Critical Values: {'1%': -3.4353516488758684, '5%': -2.8637488209107196, '10%': -2.5679459879960373}
Interpretation:
  - p-value >= 0.05: Fail to reject null - MSFT may be non-stationary

KPSS Test:
KPSS Statistic: 4.9191
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - MSFT is non-stationary

Johansen Cointegration Test:
Trace statistic: [14.05123397  1.26121423]
Critical values (90%, 95%, 99%): [[13.4294 15.4943 19.9349]
 [ 2.7055  3.8415  6.6349]]
Interpretation:
  - r = 0: No cointegration at 95% confidence level
    Trace statistic (14.05) <= 95% critical value (15.49)
  - r = 1: No cointegration at 95% confidence level
    Trace statistic (1.26) <= 95% critical value (3.84)
Conclusion: No evidence of cointegration between AAPL and MSFT
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
Best ARIMA model for AAPL:
Order: (2, 2, 3)
AIC: 6396.34
Interpretation:
  - p=2: 2 autoregressive term(s)
  - d=2: 2 difference(s) needed for stationarity
  - q=3: 3 moving average term(s)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
Best ARIMA model for MSFT:
Order: (3, 2, 3)
AIC: 7970.16
Interpretation:
  - p=3: 3 autoregressive term(s)
  - d=2: 2 difference(s) needed for stationarity
  - q=3: 3 moving average term(s)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
No description has been provided for this image
No description has been provided for this image
Forecast Interpretation for AAPL:
Last observed value: 66.81
Average forecast value: 64.66
Forecast change: -2.15
Trend: Downward forecast trend
95% CI range at period 30: [33.32, 92.49]
No description has been provided for this image
Forecast Interpretation for MSFT:
Last observed value: 151.38
Average forecast value: 148.37
Forecast change: -3.01
Trend: Downward forecast trend
95% CI range at period 30: [95.77, 195.66]

AAPL Forecast Values (next 5 periods):
1320    66.324734
1321    66.663007
1322    66.062213
1323    66.298968
1324    65.988663
Name: predicted_mean, dtype: float64

MSFT Forecast Values (next 5 periods):
1320    150.906327
1321    150.843280
1322    150.616818
1323    150.419737
1324    150.312816
Name: predicted_mean, dtype: float64
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
import matplotlib.pyplot as plt

# Upload Excel files
print("Please upload AAPL Excel file")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)

print("Please upload MSFT Excel file")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)

# Print available columns
print("\nAAPL DataFrame columns:", list(aapl_df.columns))
print("MSFT DataFrame columns:", list(msft_df.columns))

# Ask user for the correct column name
close_column = input("Please enter the column name containing closing prices: ")

# Extract closing prices
try:
    aapl_close = aapl_df[close_column]
    msft_close = msft_df[close_column]
except KeyError:
    print(f"Error: Column '{close_column}' not found in one or both DataFrames")
    raise

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]

# Split data into train (90%) and test (10%)
train_size = int(min_length * 0.9)  # 1188 observations
test_size = min_length - train_size  # 132 observations

aapl_train = aapl_close[:train_size]
aapl_test = aapl_close[train_size:]
msft_train = msft_close[:train_size]
msft_test = msft_close[train_size:]

print(f"\nTrain size: {train_size} (90%), Test size: {test_size} (10%)")

# Function for unit root tests with interpretation
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')
    print("Interpretation:")
    if adf_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be non-stationary")
    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')
    print("Interpretation:")
    if kpss_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is non-stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be stationary")

# Perform unit root tests on training data
unit_root_tests(aapl_train, "AAPL (Train)")
unit_root_tests(msft_train, "MSFT (Train)")

# Cointegration test
def cointegration_test(df):
    result = coint_johansen(df, det_order=0, k_ar_diff=1)
    print("\nJohansen Cointegration Test:")
    print(f"Trace statistic: {result.lr1}")
    print(f"Critical values (90%, 95%, 99%): {result.cvt}")
    print("Interpretation:")
    for i in range(len(result.lr1)):
        if result.lr1[i] > result.cvt[i, 1]:
            print(f"  - r = {i}: Cointegration exists at 95% confidence level")
            print(f"    Trace statistic ({result.lr1[i]:.2f}) > 95% critical value ({result.cvt[i, 1]:.2f})")
        else:
            print(f"  - r = {i}: No cointegration at 95% confidence level")
            print(f"    Trace statistic ({result.lr1[i]:.2f}) <= 95% critical value ({result.cvt[i, 1]:.2f})")
    if result.lr1[0] > result.cvt[0, 1]:
        print("Conclusion: AAPL and MSFT are cointegrated")
    else:
        print("Conclusion: No evidence of cointegration between AAPL and MSFT")

# Prepare data for cointegration (train only)
coint_df = pd.DataFrame({'AAPL': aapl_train, 'MSFT': msft_train}).dropna()
cointegration_test(coint_df)

# Function to find best ARIMA model
def find_best_arima(series, name, max_p=3, max_d=2, max_q=3):
    best_aic = float('inf')
    best_order = None
    for p in range(max_p + 1):
        for d in range(max_d + 1):
            for q in range(max_q + 1):
                try:
                    model = ARIMA(series, order=(p, d, q))
                    results = model.fit()
                    if results.aic < best_aic:
                        best_aic = results.aic
                        best_order = (p, d, q)
                except:
                    continue
    print(f"\nBest ARIMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")
    return best_order

# Find and fit best ARIMA models on training data
aapl_order = find_best_arima(aapl_train, "AAPL (Train)")
msft_order = find_best_arima(msft_train, "MSFT (Train)")
aapl_model = ARIMA(aapl_train, order=aapl_order).fit()
msft_model = ARIMA(msft_train, order=msft_order).fit()

# Forecast for the test period (132 steps)
forecast_steps = len(aapl_test)
aapl_forecast = aapl_model.forecast(steps=forecast_steps)
msft_forecast = msft_model.forecast(steps=forecast_steps)

# Plot original series with forecasts
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(range(train_size, min_length), aapl_forecast, label='AAPL Forecast', color='red')
plt.plot(msft_close, label='MSFT Historical')
plt.plot(range(train_size, min_length), msft_forecast, label='MSFT Forecast', color='green')
plt.axvline(x=train_size, color='gray', linestyle='--', label='Train/Test Split')
plt.title('AAPL and MSFT Closing Prices with Forecasts')
plt.legend()
plt.show()

# Evaluate forecast accuracy
from sklearn.metrics import mean_squared_error
aapl_mse = mean_squared_error(aapl_test, aapl_forecast)
msft_mse = mean_squared_error(msft_test, msft_forecast)
print(f"\nAAPL Test MSE: {aapl_mse:.2f}")
print(f"MSFT Test MSE: {msft_mse:.2f}")
Please upload AAPL Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (8).xlsx
Please upload MSFT Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (8).xlsx

AAPL DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
MSFT DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Please enter the column name containing closing prices:  Close

Train size: 1188 (90%), Test size: 132 (10%)

Unit Root Tests for AAPL (Train):
ADF Test:
ADF Statistic: -1.2590
p-value: 0.6477
Critical Values: {'1%': -3.4358710597388042, '5%': -2.863977991064458, '10%': -2.5680680340944337}
Interpretation:
  - p-value >= 0.05: Fail to reject null - AAPL (Train) may be non-stationary

KPSS Test:
KPSS Statistic: 4.5849
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - AAPL (Train) is non-stationary

Unit Root Tests for MSFT (Train):
ADF Test:
ADF Statistic: -0.7677
p-value: 0.8284
Critical Values: {'1%': -3.435880398285223, '5%': -2.8639821109786747, '10%': -2.5680702282397765}
Interpretation:
  - p-value >= 0.05: Fail to reject null - MSFT (Train) may be non-stationary

KPSS Test:
KPSS Statistic: 4.4196
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - MSFT (Train) is non-stationary

Johansen Cointegration Test:
Trace statistic: [14.71147122  1.67317433]
Critical values (90%, 95%, 99%): [[13.4294 15.4943 19.9349]
 [ 2.7055  3.8415  6.6349]]
Interpretation:
  - r = 0: No cointegration at 95% confidence level
    Trace statistic (14.71) <= 95% critical value (15.49)
  - r = 1: No cointegration at 95% confidence level
    Trace statistic (1.67) <= 95% critical value (3.84)
Conclusion: No evidence of cointegration between AAPL and MSFT
<ipython-input-8-aaf7fb0c58d9>:66: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-8-aaf7fb0c58d9>:66: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
Best ARIMA model for AAPL (Train):
Order: (0, 1, 0)
AIC: 5808.26
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
Best ARIMA model for MSFT (Train):
Order: (2, 1, 2)
AIC: 7170.45
No description has been provided for this image
AAPL Test MSE: 274.89
MSFT Test MSE: 1011.65
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
from statsmodels.tsa.seasonal import STL
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error

# Upload Excel files
print("Please upload AAPL Excel file (e.g., HistoricalPrices-4.xlsx)")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)  # Header en fila 1 (segunda fila)

print("Please upload MSFT Excel file (e.g., HistoricalPrices-3.xlsx)")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)  # Header en fila 1 (segunda fila)

# Print available columns
print("\nAAPL DataFrame columns:", list(aapl_df.columns))
print("MSFT DataFrame columns:", list(msft_df.columns))

# Ask user for the correct column name
close_column = input("Please enter the column name containing closing prices (e.g., ' Close'): ")

# Extract closing prices
try:
    aapl_close = aapl_df[close_column]
    msft_close = msft_df[close_column]
except KeyError:
    print(f"Error: Column '{close_column}' not found in one or both DataFrames")
    print("Please check the column names and try again")
    raise

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]  # Asegúrate de que MSFT tenga la misma longitud

# 1. Determine the presence of main trend and seasonality
print("\n1. Determining Trend and Seasonality:")

# Plot original series to check for trend and seasonality
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(msft_close, label='MSFT Historical')
plt.title('AAPL and MSFT Closing Prices (Trend Analysis)')
plt.legend()
plt.show()

# Check for seasonality (STL decomposition, assuming annual periodicity for daily data)
try:
    stl_aapl = STL(aapl_close, period=252)  # 252 trading days ~ 1 year
    result_aapl = stl_aapl.fit()
    stl_msft = STL(msft_close, period=252)
    result_msft = stl_msft.fit()

    # Plot STL decomposition
    plt.figure(figsize=(12,8))
    plt.subplot(4,1,1)
    plt.plot(aapl_close, label='AAPL Original')
    plt.legend()
    plt.subplot(4,1,2)
    plt.plot(result_aapl.trend, label='Trend')
    plt.legend()
    plt.subplot(4,1,3)
    plt.plot(result_aapl.seasonal, label='Seasonal')
    plt.legend()
    plt.subplot(4,1,4)
    plt.plot(result_aapl.resid, label='Residual')
    plt.legend()
    plt.suptitle('STL Decomposition for AAPL')
    plt.tight_layout()
    plt.show()

    plt.figure(figsize=(12,8))
    plt.subplot(4,1,1)
    plt.plot(msft_close, label='MSFT Original')
    plt.legend()
    plt.subplot(4,1,2)
    plt.plot(result_msft.trend, label='Trend')
    plt.legend()
    plt.subplot(4,1,3)
    plt.plot(result_msft.seasonal, label='Seasonal')
    plt.legend()
    plt.subplot(4,1,4)
    plt.plot(result_msft.resid, label='Residual')
    plt.legend()
    plt.suptitle('STL Decomposition for MSFT')
    plt.tight_layout()
    plt.show()

    # Interpret seasonality
    if np.std(result_aapl.seasonal.dropna()) < 0.1 * np.std(aapl_close):
        print("AAPL: No significant seasonality detected (seasonal component is small).")
    else:
        print("AAPL: Possible seasonality detected (check seasonal component in STL plot).")

    if np.std(result_msft.seasonal.dropna()) < 0.1 * np.std(msft_close):
        print("MSFT: No significant seasonality detected (seasonal component is small).")
    else:
        print("MSFT: Possible seasonality detected (check seasonal component in STL plot).")
except:
    print("STL decomposition failed or data length insufficient for seasonality analysis.")
    print("AAPL and MSFT: No significant seasonality assumed based on current analysis.")

# Interpret trend from plots
print("Trend Analysis: Both AAPL and MSFT show a clear trend (non-stationary behavior, likely upward or downward over time, visible in the plots).")

# 2. Determine if the data are stationary
print("\n2. Determining Stationarity:")

# Function for unit root tests with interpretation
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')
    print("Interpretation:")
    if adf_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be non-stationary")

    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')
    print("Interpretation:")
    if kpss_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is non-stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be stationary")

# Perform unit root tests on original data
unit_root_tests(aapl_close, "AAPL (Original)")
unit_root_tests(msft_close, "MSFT (Original)")

# 3. Split the data into train (90%) and test (10%)
print("\n3. Splitting Data into Train (90%) and Test (10%):")
train_size = int(min_length * 0.9)  # 1188 observations
test_size = min_length - train_size  # 132 observations

aapl_train = aapl_close[:train_size]
aapl_test = aapl_close[train_size:]
msft_train = msft_close[:train_size]
msft_test = msft_close[train_size:]

print(f"Train size: {train_size} (90%), Test size: {test_size} (10%)")

# 4. Make the data stationary (if needed) with differencing
print("\n4. Making Data Stationary (Differencing if Needed):")

# Diferenciar las series (d=2, como en ARIMA de Códigos 2 y 3)
aapl_diff = aapl_close.diff().diff().dropna()
msft_diff = msft_close.diff().diff().dropna()

# Obtener las longitudes después de las diferencias
diff_min_length = min(len(aapl_diff), len(msft_diff))
diff_train_size = int(diff_min_length * 0.9)
diff_test_size = diff_min_length - diff_train_size

# Diferenciar train/test sets usando las nuevas longitudes
aapl_train_diff = aapl_diff[:diff_train_size]
aapl_test_diff = aapl_diff[diff_train_size:]
msft_train_diff = msft_diff[:diff_train_size]
msft_test_diff = msft_diff[diff_train_size:]

print(f"Differenced Train size: {diff_train_size} (90%), Differenced Test size: {diff_test_size} (10%)")

# Verificar estacionariedad en datos diferenciados
unit_root_tests(aapl_train_diff, "AAPL (Differenced Train)")
unit_root_tests(msft_train_diff, "MSFT (Differenced Train)")

# 5. Fit ARIMA models on differenced training data
print("\n5. Fitting ARIMA Models on Differenced Training Data:")

# Function to find best ARIMA model
def find_best_arima(series, name, max_p=3, max_d=0, max_q=3):  # max_d=0 since already differenced
    best_aic = float('inf')
    best_order = None
    for p in range(max_p + 1):
        for q in range(max_q + 1):
            try:
                model = ARIMA(series, order=(p, 0, q))  # d=0 porque ya diferenciamos
                results = model.fit()
                if results.aic < best_aic:
                    best_aic = results.aic
                    best_order = (p, 0, q)
            except:
                continue
    print(f"\nBest ARIMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")
    return best_order

# Find and fit best ARIMA models on differenced train data
aapl_order_diff = find_best_arima(aapl_train_diff, "AAPL (Differenced Train)")
msft_order_diff = find_best_arima(msft_train_diff, "MSFT (Differenced Train)")
aapl_model_diff = ARIMA(aapl_train_diff, order=aapl_order_diff).fit()
msft_model_diff = ARIMA(msft_train_diff, order=msft_order_diff).fit()

# 6. Forecast and evaluate on test set
print("\n6. Forecasting and Evaluating on Test Set:")
forecast_steps = len(aapl_test_diff)

# Forecast differenced series
aapl_forecast_diff = aapl_model_diff.forecast(steps=forecast_steps)
msft_forecast_diff = msft_model_diff.forecast(steps=forecast_steps)

# Revert differencing to get forecasts in original scale (correctly for d=2)
# Initialize forecast series with the last value before test set
aapl_forecast = pd.Series(index=aapl_test.index[:forecast_steps], dtype=float)
msft_forecast = pd.Series(index=msft_test.index[:forecast_steps], dtype=float)

# First value (revert first difference)
last_value_aapl = aapl_close.iloc[train_size - 1]
last_diff_aapl = aapl_close.diff().iloc[train_size - 1]
aapl_forecast.iloc[0] = last_value_aapl + aapl_forecast_diff.iloc[0] + last_diff_aapl

last_value_msft = msft_close.iloc[train_size - 1]
last_diff_msft = msft_close.diff().iloc[train_size - 1]
msft_forecast.iloc[0] = last_value_msft + msft_forecast_diff.iloc[0] + last_diff_msft

# Subsequent values (revert second difference)
for i in range(1, forecast_steps):
    aapl_forecast.iloc[i] = (aapl_forecast.iloc[i-1] + aapl_forecast_diff.iloc[i] +
                            (aapl_forecast_diff.iloc[i-1] if i > 1 else 0))
    msft_forecast.iloc[i] = (msft_forecast.iloc[i-1] + msft_forecast_diff.iloc[i] +
                            (msft_forecast_diff.iloc[i-1] if i > 1 else 0))

# Check for NaN values and drop or fill them
aapl_forecast = aapl_forecast.dropna()
aapl_test = aapl_test.loc[aapl_forecast.index]  # Alinear índices

msft_forecast = msft_forecast.dropna()
msft_test = msft_test.loc[msft_forecast.index]  # Alinear índices

# Evaluate forecast accuracy with MSE on original scale, ensuring no NaN
if len(aapl_test) > 0 and len(aapl_forecast) > 0:
    aapl_mse = mean_squared_error(aapl_test, aapl_forecast)
    print(f"\nAAPL Test MSE (Original Scale): {aapl_mse:.2f}")
else:
    print("\nAAPL: Insufficient data for MSE calculation due to NaN values or misalignment.")

if len(msft_test) > 0 and len(msft_forecast) > 0:
    msft_mse = mean_squared_error(msft_test, msft_forecast)
    print(f"MSFT Test MSE (Original Scale): {msft_mse:.2f}")
else:
    print("\nMSFT: Insufficient data for MSE calculation due to NaN values or misalignment.")

# Plot original series with forecasts
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(aapl_forecast.index, aapl_forecast, label='AAPL Forecast', color='red')
plt.plot(msft_close, label='MSFT Historical')
plt.plot(msft_forecast.index, msft_forecast, label='MSFT Forecast', color='green')
plt.axvline(x=train_size, color='gray', linestyle='--', label='Train/Test Split')
plt.title('AAPL and MSFT Closing Prices with Forecasts')
plt.legend()
plt.show()

# Plot differenced series to confirm stationarity
# Usar índices adecuados para las series diferenciadas
diff_train_indices = range(len(aapl_train_diff))
diff_test_indices = range(diff_train_size, diff_train_size + len(aapl_test_diff))

plt.figure(figsize=(12,6))
plt.plot(diff_train_indices, aapl_train_diff, label='AAPL Differenced (Train)')
plt.plot(diff_test_indices, aapl_test_diff, label='AAPL Differenced (Test)')
plt.plot(diff_train_indices, aapl_model_diff.fittedvalues, label='AAPL Fitted (Differenced)', color='red', linestyle='--')
plt.title('AAPL Differenced Series (Stationary)')
plt.legend()
plt.show()

plt.figure(figsize=(12,6))
plt.plot(diff_train_indices, msft_train_diff, label='MSFT Differenced (Train)')
plt.plot(diff_test_indices, msft_test_diff, label='MSFT Differenced (Test)')
plt.plot(diff_train_indices, msft_model_diff.fittedvalues, label='MSFT Fitted (Differenced)', color='green', linestyle='--')
plt.title('MSFT Differenced Series (Stationary)')
plt.legend()
plt.show()

# Print first 5 forecast values
print("\nAAPL Forecast Values (next 5 periods):")
print(aapl_forecast[:5])
print("\nMSFT Forecast Values (next 5 periods):")
print(msft_forecast[:5])
Please upload AAPL Excel file (e.g., HistoricalPrices-4.xlsx)
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (12).xlsx
Please upload MSFT Excel file (e.g., HistoricalPrices-3.xlsx)
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (12).xlsx

AAPL DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
MSFT DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Please enter the column name containing closing prices (e.g., ' Close'):  Close

1. Determining Trend and Seasonality:
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
AAPL: Possible seasonality detected (check seasonal component in STL plot).
MSFT: Possible seasonality detected (check seasonal component in STL plot).
Trend Analysis: Both AAPL and MSFT show a clear trend (non-stationary behavior, likely upward or downward over time, visible in the plots).

2. Determining Stationarity:

Unit Root Tests for AAPL (Original):
ADF Test:
ADF Statistic: -1.0119
p-value: 0.7489
Critical Values: {'1%': -3.4353174541055567, '5%': -2.863733732389869, '10%': -2.5679379527245407}
Interpretation:
  - p-value >= 0.05: Fail to reject null - AAPL (Original) may be non-stationary

KPSS Test:
KPSS Statistic: 5.0423
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - AAPL (Original) is non-stationary

Unit Root Tests for MSFT (Original):
ADF Test:
ADF Statistic: -0.7045
p-value: 0.8456
Critical Values: {'1%': -3.4353516488758684, '5%': -2.8637488209107196, '10%': -2.5679459879960373}
Interpretation:
  - p-value >= 0.05: Fail to reject null - MSFT (Original) may be non-stationary

KPSS Test:
KPSS Statistic: 4.9191
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - MSFT (Original) is non-stationary

3. Splitting Data into Train (90%) and Test (10%):
Train size: 1188 (90%), Test size: 132 (10%)

4. Making Data Stationary (Differencing if Needed):
Differenced Train size: 1186 (90%), Differenced Test size: 132 (10%)

Unit Root Tests for AAPL (Differenced Train):
<ipython-input-12-34c81efa34f2>:132: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-12-34c81efa34f2>:132: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
ADF Test:
ADF Statistic: -12.7716
p-value: 0.0000
Critical Values: {'1%': -3.4359803948357723, '5%': -2.8640262259528595, '10%': -2.5680937227063922}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - AAPL (Differenced Train) is stationary

KPSS Test:
KPSS Statistic: 0.2231
p-value: 0.1000
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value >= 0.05: Fail to reject null - AAPL (Differenced Train) may be stationary

Unit Root Tests for MSFT (Differenced Train):
ADF Test:
ADF Statistic: -13.9784
p-value: 0.0000
Critical Values: {'1%': -3.435975551167148, '5%': -2.864024089129731, '10%': -2.568092584685664}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - MSFT (Differenced Train) is stationary

KPSS Test:
KPSS Statistic: 0.2059
p-value: 0.1000
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value >= 0.05: Fail to reject null - MSFT (Differenced Train) may be stationary

5. Fitting ARIMA Models on Differenced Training Data:
<ipython-input-12-34c81efa34f2>:132: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-12-34c81efa34f2>:132: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
Best ARIMA model for AAPL (Differenced Train):
Order: (0, 0, 1)
AIC: 5813.02
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
Best ARIMA model for MSFT (Differenced Train):
Order: (2, 0, 1)
AIC: 7176.07
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
6. Forecasting and Evaluating on Test Set:

AAPL Test MSE (Original Scale): 270.75
MSFT Test MSE (Original Scale): 1061.93
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:837: ValueWarning: No supported index is available. Prediction results will be given with an integer index beginning at `start`.
  return get_prediction_index(
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:837: FutureWarning: No supported index is available. In the next version, calling this method in a model without a supported index will result in an exception.
  return get_prediction_index(
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:837: ValueWarning: No supported index is available. Prediction results will be given with an integer index beginning at `start`.
  return get_prediction_index(
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
AAPL Forecast Values (next 5 periods):
1188    88.075382
1189    88.075368
1190    88.075340
1191    88.075311
1192    88.075283
dtype: float64

MSFT Forecast Values (next 5 periods):
1188    196.239608
1189    195.950553
1190    196.341077
1191    197.011519
1192    196.961891
dtype: float64
In [ ]:
!pip install statsmodels --upgrade
Requirement already satisfied: statsmodels in /usr/local/lib/python3.11/dist-packages (0.14.4)
Requirement already satisfied: numpy<3,>=1.22.3 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (1.26.4)
Requirement already satisfied: scipy!=1.9.2,>=1.8 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (1.13.1)
Requirement already satisfied: pandas!=2.1.0,>=1.4 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (2.2.2)
Requirement already satisfied: patsy>=0.5.6 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (1.0.1)
Requirement already satisfied: packaging>=21.3 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (24.2)
Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas!=2.1.0,>=1.4->statsmodels) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas!=2.1.0,>=1.4->statsmodels) (2025.1)
Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas!=2.1.0,>=1.4->statsmodels) (2025.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.8.2->pandas!=2.1.0,>=1.4->statsmodels) (1.17.0)
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
import matplotlib.pyplot as plt

# Upload Excel files
print("Please upload AAPL Excel file")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)  # Header en fila 1 (segunda fila)

print("Please upload MSFT Excel file")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)  # Header en fila 1 (segunda fila)

# Print available columns to debug
print("\nColumns in AAPL DataFrame:", list(aapl_df.columns))
print("Columns in MSFT DataFrame:", list(msft_df.columns))

# Extract closing prices (column is named ' Close' with a leading space)
aapl_close = aapl_df[' Close']
msft_close = msft_df[' Close']

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]

# Function for unit root tests
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")

    # ADF Test
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')

    # KPSS Test
    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')

# Perform unit root tests
unit_root_tests(aapl_close, "AAPL")
unit_root_tests(msft_close, "MSFT")

# Difference the series if non-stationary
aapl_diff = aapl_close.diff().dropna()
msft_diff = msft_close.diff().dropna()

# Function to find best ARMA model using ARIMA with d=0
def find_best_arma(series, name, max_p=3, max_q=3):
    best_aic = float('inf')
    best_order = None

    for p in range(max_p + 1):
        for q in range(max_q + 1):
            try:
                model = ARIMA(series, order=(p, 0, q))
                results = model.fit()
                if results.aic < best_aic:
                    best_aic = results.aic
                    best_order = (p, q)
            except:
                continue

    print(f"\nBest ARMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")

    # Fit and return best model
    best_model = ARIMA(series, order=(best_order[0], 0, best_order[1])).fit()
    return best_model

# Fit ARMA models
aapl_arma = find_best_arma(aapl_diff, "AAPL")
msft_arma = find_best_arma(msft_diff, "MSFT")

# Cointegration test
def cointegration_test(df):
    result = coint_johansen(df, det_order=0, k_ar_diff=1)
    print("\nJohansen Cointegration Test:")
    print(f"Trace statistic: {result.lr1}")
    print(f"Critical values (90%, 95%, 99%): {result.cvt}")

    for i in range(len(result.lr1)):
        if result.lr1[i] > result.cvt[i, 1]:  # 95% critical value
            print(f"r = {i}: Cointegration exists at 95% confidence level")
        else:
            print(f"r = {i}: No cointegration at 95% confidence level")

# Prepare data for cointegration
coint_df = pd.DataFrame({
    'AAPL': aapl_close,
    'MSFT': msft_close
}).dropna()

# Run cointegration test
cointegration_test(coint_df)

# Plot the series
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL')
plt.plot(msft_close, label='MSFT')
plt.title('AAPL vs MSFT Closing Prices')
plt.legend()
plt.show()

# Plot the differenced series
plt.figure(figsize=(12,6))
plt.plot(aapl_diff, label='AAPL Diff')
plt.plot(msft_diff, label='MSFT Diff')
plt.title('Differenced Series')
plt.legend()
plt.show()
Please upload AAPL Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (5).xlsx
Please upload MSFT Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (5).xlsx

Columns in AAPL DataFrame: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Columns in MSFT DataFrame: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']

Unit Root Tests for AAPL:
ADF Test:
ADF Statistic: -1.0119
p-value: 0.7489
Critical Values: {'1%': -3.4353174541055567, '5%': -2.863733732389869, '10%': -2.5679379527245407}

KPSS Test:
KPSS Statistic: 5.0423
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}

Unit Root Tests for MSFT:
ADF Test:
ADF Statistic: -0.7045
p-value: 0.8456
Critical Values: {'1%': -3.4353516488758684, '5%': -2.8637488209107196, '10%': -2.5679459879960373}

KPSS Test:
KPSS Statistic: 4.9191
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
<ipython-input-5-ec017b977b81>:47: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-5-ec017b977b81>:47: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
Best ARMA model for AAPL:
Order: (0, 0)
AIC: 6400.01
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
Best ARMA model for MSFT:
Order: (0, 1)
AIC: 7971.27
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
Johansen Cointegration Test:
Trace statistic: [14.05123397  1.26121423]
Critical values (90%, 95%, 99%): [[13.4294 15.4943 19.9349]
 [ 2.7055  3.8415  6.6349]]
r = 0: No cointegration at 95% confidence level
r = 1: No cointegration at 95% confidence level
No description has been provided for this image
No description has been provided for this image
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
import matplotlib.pyplot as plt

# Upload Excel files
print("Please upload AAPL Excel file")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)  # Header en fila 1 (segunda fila)

print("Please upload MSFT Excel file")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)  # Header en fila 1 (segunda fila)

# Print available columns
print("\nAAPL DataFrame columns:", list(aapl_df.columns))
print("MSFT DataFrame columns:", list(msft_df.columns))

# Ask user for the correct column name
close_column = input("Please enter the column name containing closing prices: ")

# Extract closing prices using user-specified column name
try:
    aapl_close = aapl_df[close_column]
    msft_close = msft_df[close_column]
except KeyError:
    print(f"Error: Column '{close_column}' not found in one or both DataFrames")
    print("Please check the column names and try again")
    raise

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]

# Function for unit root tests
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")

    # ADF Test
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')

    # KPSS Test
    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')

# Perform unit root tests
unit_root_tests(aapl_close, "AAPL")
unit_root_tests(msft_close, "MSFT")

# Cointegration test
def cointegration_test(df):
    result = coint_johansen(df, det_order=0, k_ar_diff=1)
    print("\nJohansen Cointegration Test:")
    print(f"Trace statistic: {result.lr1}")
    print(f"Critical values (90%, 95%, 99%): {result.cvt}")

    for i in range(len(result.lr1)):
        if result.lr1[i] > result.cvt[i, 1]:
            print(f"r = {i}: Cointegration exists at 95% confidence level")
        else:
            print(f"r = {i}: No cointegration at 95% confidence level")

# Prepare data for cointegration
coint_df = pd.DataFrame({
    'AAPL': aapl_close,
    'MSFT': msft_close
}).dropna()
cointegration_test(coint_df)

# Function to find best ARIMA model
def find_best_arima(series, name, max_p=3, max_d=2, max_q=3):
    best_aic = float('inf')
    best_order = None

    for p in range(max_p + 1):
        for d in range(max_d + 1):
            for q in range(max_q + 1):
                try:
                    model = ARIMA(series, order=(p, d, q))
                    results = model.fit()
                    if results.aic < best_aic:
                        best_aic = results.aic
                        best_order = (p, d, q)
                except:
                    continue

    print(f"\nBest ARIMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")
    return best_order

# Find and fit best ARIMA models
aapl_order = find_best_arima(aapl_close, "AAPL")
msft_order = find_best_arima(msft_close, "MSFT")

# Fit final ARIMA models
aapl_model = ARIMA(aapl_close, order=aapl_order).fit()
msft_model = ARIMA(msft_close, order=msft_order).fit()

# Forecast next 30 periods
forecast_steps = 30
aapl_forecast = aapl_model.forecast(steps=forecast_steps)
msft_forecast = msft_model.forecast(steps=forecast_steps)

# Create forecast index (assuming daily data)
last_index = len(aapl_close) - 1
forecast_index = range(last_index + 1, last_index + 1 + forecast_steps)

# Plot original series with forecasts
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(forecast_index, aapl_forecast, label='AAPL Forecast', color='red')
plt.plot(msft_close, label='MSFT Historical')
plt.plot(forecast_index, msft_forecast, label='MSFT Forecast', color='green')
plt.title('AAPL and MSFT Closing Prices with Forecasts')
plt.legend()
plt.show()

# Plot detailed forecast with confidence intervals
def plot_forecast(model, series, name, steps=30):
    forecast_obj = model.get_forecast(steps=steps)
    forecast = forecast_obj.predicted_mean
    conf_int = forecast_obj.conf_int()

    forecast_index = range(len(series), len(series) + steps)

    plt.figure(figsize=(12,6))
    plt.plot(series, label=f'{name} Historical')
    plt.plot(forecast_index, forecast, label='Forecast', color='red')
    plt.fill_between(forecast_index,
                    conf_int.iloc[:, 0],
                    conf_int.iloc[:, 1],
                    color='pink',
                    alpha=0.3,
                    label='95% Confidence Interval')
    plt.title(f'{name} Price Forecast')
    plt.legend()
    plt.show()

# Generate detailed forecast plots
plot_forecast(aapl_model, aapl_close, "AAPL")
plot_forecast(msft_model, msft_close, "MSFT")

# Print forecast values
print("\nAAPL Forecast Values (next 5 periods):")
print(aapl_forecast[:5])
print("\nMSFT Forecast Values (next 5 periods):")
print(msft_forecast[:5])
Please upload AAPL Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (6).xlsx
Please upload MSFT Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (6).xlsx

AAPL DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
MSFT DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Please enter the column name containing closing prices:  Close

Unit Root Tests for AAPL:
ADF Test:
ADF Statistic: -1.0119
p-value: 0.7489
Critical Values: {'1%': -3.4353174541055567, '5%': -2.863733732389869, '10%': -2.5679379527245407}

KPSS Test:
KPSS Statistic: 5.0423
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}

Unit Root Tests for MSFT:
ADF Test:
ADF Statistic: -0.7045
p-value: 0.8456
Critical Values: {'1%': -3.4353516488758684, '5%': -2.8637488209107196, '10%': -2.5679459879960373}

KPSS Test:
KPSS Statistic: 4.9191
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}

Johansen Cointegration Test:
Trace statistic: [14.05123397  1.26121423]
Critical values (90%, 95%, 99%): [[13.4294 15.4943 19.9349]
 [ 2.7055  3.8415  6.6349]]
r = 0: No cointegration at 95% confidence level
r = 1: No cointegration at 95% confidence level
<ipython-input-6-1840fc480155>:55: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-6-1840fc480155>:55: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
Best ARIMA model for AAPL:
Order: (2, 2, 3)
AIC: 6396.34
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
Best ARIMA model for MSFT:
Order: (3, 2, 3)
AIC: 7970.16
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
AAPL Forecast Values (next 5 periods):
1320    66.324734
1321    66.663007
1322    66.062213
1323    66.298968
1324    65.988663
Name: predicted_mean, dtype: float64

MSFT Forecast Values (next 5 periods):
1320    150.906327
1321    150.843280
1322    150.616818
1323    150.419737
1324    150.312816
Name: predicted_mean, dtype: float64
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
import matplotlib.pyplot as plt

# Upload Excel files
print("Please upload AAPL Excel file")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)  # Header en fila 1 (segunda fila)

print("Please upload MSFT Excel file")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)  # Header en fila 1 (segunda fila)

# Print available columns
print("\nAAPL DataFrame columns:", list(aapl_df.columns))
print("MSFT DataFrame columns:", list(msft_df.columns))

# Ask user for the correct column name
close_column = input("Please enter the column name containing closing prices: ")

# Extract closing prices using user-specified column name
try:
    aapl_close = aapl_df[close_column]
    msft_close = msft_df[close_column]
except KeyError:
    print(f"Error: Column '{close_column}' not found in one or both DataFrames")
    print("Please check the column names and try again")
    raise

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]

# Function for unit root tests with interpretation
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")

    # ADF Test
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')
    print("Interpretation:")
    if adf_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be non-stationary")

    # KPSS Test
    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')
    print("Interpretation:")
    if kpss_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is non-stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be stationary")

# Perform unit root tests
unit_root_tests(aapl_close, "AAPL")
unit_root_tests(msft_close, "MSFT")

# Cointegration test with interpretation
def cointegration_test(df):
    result = coint_johansen(df, det_order=0, k_ar_diff=1)
    print("\nJohansen Cointegration Test:")
    print(f"Trace statistic: {result.lr1}")
    print(f"Critical values (90%, 95%, 99%): {result.cvt}")
    print("Interpretation:")
    for i in range(len(result.lr1)):
        if result.lr1[i] > result.cvt[i, 1]:
            print(f"  - r = {i}: Cointegration exists at 95% confidence level")
            print(f"    Trace statistic ({result.lr1[i]:.2f}) > 95% critical value ({result.cvt[i, 1]:.2f})")
        else:
            print(f"  - r = {i}: No cointegration at 95% confidence level")
            print(f"    Trace statistic ({result.lr1[i]:.2f}) <= 95% critical value ({result.cvt[i, 1]:.2f})")
    if result.lr1[0] > result.cvt[0, 1]:
        print("Conclusion: AAPL and MSFT are cointegrated - they share a long-run equilibrium relationship")
    else:
        print("Conclusion: No evidence of cointegration between AAPL and MSFT")

# Prepare data for cointegration
coint_df = pd.DataFrame({
    'AAPL': aapl_close,
    'MSFT': msft_close
}).dropna()
cointegration_test(coint_df)

# Function to find best ARIMA model with interpretation
def find_best_arima(series, name, max_p=3, max_d=2, max_q=3):
    best_aic = float('inf')
    best_order = None

    for p in range(max_p + 1):
        for d in range(max_d + 1):
            for q in range(max_q + 1):
                try:
                    model = ARIMA(series, order=(p, d, q))
                    results = model.fit()
                    if results.aic < best_aic:
                        best_aic = results.aic
                        best_order = (p, d, q)
                except:
                    continue

    print(f"\nBest ARIMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")
    print("Interpretation:")
    print(f"  - p={best_order[0]}: {best_order[0]} autoregressive term(s)")
    print(f"  - d={best_order[1]}: {best_order[1]} difference(s) needed for stationarity")
    print(f"  - q={best_order[2]}: {best_order[2]} moving average term(s)")
    return best_order

# Find and fit best ARIMA models
aapl_order = find_best_arima(aapl_close, "AAPL")
msft_order = find_best_arima(msft_close, "MSFT")

# Fit final ARIMA models
aapl_model = ARIMA(aapl_close, order=aapl_order).fit()
msft_model = ARIMA(msft_close, order=msft_order).fit()

# Forecast next 30 periods
forecast_steps = 30
aapl_forecast = aapl_model.forecast(steps=forecast_steps)
msft_forecast = msft_model.forecast(steps=forecast_steps)

# Create forecast index
last_index = len(aapl_close) - 1
forecast_index = range(last_index + 1, last_index + 1 + forecast_steps)

# Plot original series with forecasts
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(forecast_index, aapl_forecast, label='AAPL Forecast', color='red')
plt.plot(msft_close, label='MSFT Historical')
plt.plot(forecast_index, msft_forecast, label='MSFT Forecast', color='green')
plt.title('AAPL and MSFT Closing Prices with Forecasts')
plt.legend()
plt.show()

# Detailed forecast plot with confidence intervals and interpretation
def plot_forecast(model, series, name, steps=30):
    forecast_obj = model.get_forecast(steps=steps)
    forecast = forecast_obj.predicted_mean
    conf_int = forecast_obj.conf_int()

    forecast_index = range(len(series), len(series) + steps)

    plt.figure(figsize=(12,6))
    plt.plot(series, label=f'{name} Historical')
    plt.plot(forecast_index, forecast, label='Forecast', color='red')
    plt.fill_between(forecast_index,
                    conf_int.iloc[:, 0],
                    conf_int.iloc[:, 1],
                    color='pink',
                    alpha=0.3,
                    label='95% Confidence Interval')
    plt.title(f'{name} Price Forecast')
    plt.legend()
    plt.show()

    # Forecast interpretation
    last_value = series.iloc[-1]
    mean_forecast = forecast.mean()
    print(f"\nForecast Interpretation for {name}:")
    print(f"Last observed value: {last_value:.2f}")
    print(f"Average forecast value: {mean_forecast:.2f}")
    print(f"Forecast change: {mean_forecast - last_value:.2f}")
    if mean_forecast > last_value:
        print("Trend: Upward forecast trend")
    elif mean_forecast < last_value:
        print("Trend: Downward forecast trend")
    else:
        print("Trend: Flat forecast trend")
    print(f"95% CI range at period {steps}: [{conf_int.iloc[-1, 0]:.2f}, {conf_int.iloc[-1, 1]:.2f}]")

# Generate detailed forecast plots and interpretations
plot_forecast(aapl_model, aapl_close, "AAPL")
plot_forecast(msft_model, msft_close, "MSFT")

# Print forecast values
print("\nAAPL Forecast Values (next 5 periods):")
print(aapl_forecast[:5])
print("\nMSFT Forecast Values (next 5 periods):")
print(msft_forecast[:5])
Please upload AAPL Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (7).xlsx
Please upload MSFT Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (7).xlsx

AAPL DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
MSFT DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Please enter the column name containing closing prices:  Close

Unit Root Tests for AAPL:
ADF Test:
ADF Statistic: -1.0119
p-value: 0.7489
Critical Values: {'1%': -3.4353174541055567, '5%': -2.863733732389869, '10%': -2.5679379527245407}
Interpretation:
  - p-value >= 0.05: Fail to reject null - AAPL may be non-stationary

KPSS Test:
KPSS Statistic: 5.0423
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - AAPL is non-stationary

Unit Root Tests for MSFT:
<ipython-input-7-8d5326ea4ab0>:60: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-7-8d5326ea4ab0>:60: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
ADF Test:
ADF Statistic: -0.7045
p-value: 0.8456
Critical Values: {'1%': -3.4353516488758684, '5%': -2.8637488209107196, '10%': -2.5679459879960373}
Interpretation:
  - p-value >= 0.05: Fail to reject null - MSFT may be non-stationary

KPSS Test:
KPSS Statistic: 4.9191
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - MSFT is non-stationary

Johansen Cointegration Test:
Trace statistic: [14.05123397  1.26121423]
Critical values (90%, 95%, 99%): [[13.4294 15.4943 19.9349]
 [ 2.7055  3.8415  6.6349]]
Interpretation:
  - r = 0: No cointegration at 95% confidence level
    Trace statistic (14.05) <= 95% critical value (15.49)
  - r = 1: No cointegration at 95% confidence level
    Trace statistic (1.26) <= 95% critical value (3.84)
Conclusion: No evidence of cointegration between AAPL and MSFT
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
Best ARIMA model for AAPL:
Order: (2, 2, 3)
AIC: 6396.34
Interpretation:
  - p=2: 2 autoregressive term(s)
  - d=2: 2 difference(s) needed for stationarity
  - q=3: 3 moving average term(s)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
Best ARIMA model for MSFT:
Order: (3, 2, 3)
AIC: 7970.16
Interpretation:
  - p=3: 3 autoregressive term(s)
  - d=2: 2 difference(s) needed for stationarity
  - q=3: 3 moving average term(s)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
No description has been provided for this image
No description has been provided for this image
Forecast Interpretation for AAPL:
Last observed value: 66.81
Average forecast value: 64.66
Forecast change: -2.15
Trend: Downward forecast trend
95% CI range at period 30: [33.32, 92.49]
No description has been provided for this image
Forecast Interpretation for MSFT:
Last observed value: 151.38
Average forecast value: 148.37
Forecast change: -3.01
Trend: Downward forecast trend
95% CI range at period 30: [95.77, 195.66]

AAPL Forecast Values (next 5 periods):
1320    66.324734
1321    66.663007
1322    66.062213
1323    66.298968
1324    65.988663
Name: predicted_mean, dtype: float64

MSFT Forecast Values (next 5 periods):
1320    150.906327
1321    150.843280
1322    150.616818
1323    150.419737
1324    150.312816
Name: predicted_mean, dtype: float64
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
import matplotlib.pyplot as plt

# Upload Excel files
print("Please upload AAPL Excel file")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)

print("Please upload MSFT Excel file")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)

# Print available columns
print("\nAAPL DataFrame columns:", list(aapl_df.columns))
print("MSFT DataFrame columns:", list(msft_df.columns))

# Ask user for the correct column name
close_column = input("Please enter the column name containing closing prices: ")

# Extract closing prices
try:
    aapl_close = aapl_df[close_column]
    msft_close = msft_df[close_column]
except KeyError:
    print(f"Error: Column '{close_column}' not found in one or both DataFrames")
    raise

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]

# Split data into train (90%) and test (10%)
train_size = int(min_length * 0.9)  # 1188 observations
test_size = min_length - train_size  # 132 observations

aapl_train = aapl_close[:train_size]
aapl_test = aapl_close[train_size:]
msft_train = msft_close[:train_size]
msft_test = msft_close[train_size:]

print(f"\nTrain size: {train_size} (90%), Test size: {test_size} (10%)")

# Function for unit root tests with interpretation
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')
    print("Interpretation:")
    if adf_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be non-stationary")
    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')
    print("Interpretation:")
    if kpss_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is non-stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be stationary")

# Perform unit root tests on training data
unit_root_tests(aapl_train, "AAPL (Train)")
unit_root_tests(msft_train, "MSFT (Train)")

# Cointegration test
def cointegration_test(df):
    result = coint_johansen(df, det_order=0, k_ar_diff=1)
    print("\nJohansen Cointegration Test:")
    print(f"Trace statistic: {result.lr1}")
    print(f"Critical values (90%, 95%, 99%): {result.cvt}")
    print("Interpretation:")
    for i in range(len(result.lr1)):
        if result.lr1[i] > result.cvt[i, 1]:
            print(f"  - r = {i}: Cointegration exists at 95% confidence level")
            print(f"    Trace statistic ({result.lr1[i]:.2f}) > 95% critical value ({result.cvt[i, 1]:.2f})")
        else:
            print(f"  - r = {i}: No cointegration at 95% confidence level")
            print(f"    Trace statistic ({result.lr1[i]:.2f}) <= 95% critical value ({result.cvt[i, 1]:.2f})")
    if result.lr1[0] > result.cvt[0, 1]:
        print("Conclusion: AAPL and MSFT are cointegrated")
    else:
        print("Conclusion: No evidence of cointegration between AAPL and MSFT")

# Prepare data for cointegration (train only)
coint_df = pd.DataFrame({'AAPL': aapl_train, 'MSFT': msft_train}).dropna()
cointegration_test(coint_df)

# Function to find best ARIMA model
def find_best_arima(series, name, max_p=3, max_d=2, max_q=3):
    best_aic = float('inf')
    best_order = None
    for p in range(max_p + 1):
        for d in range(max_d + 1):
            for q in range(max_q + 1):
                try:
                    model = ARIMA(series, order=(p, d, q))
                    results = model.fit()
                    if results.aic < best_aic:
                        best_aic = results.aic
                        best_order = (p, d, q)
                except:
                    continue
    print(f"\nBest ARIMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")
    return best_order

# Find and fit best ARIMA models on training data
aapl_order = find_best_arima(aapl_train, "AAPL (Train)")
msft_order = find_best_arima(msft_train, "MSFT (Train)")
aapl_model = ARIMA(aapl_train, order=aapl_order).fit()
msft_model = ARIMA(msft_train, order=msft_order).fit()

# Forecast for the test period (132 steps)
forecast_steps = len(aapl_test)
aapl_forecast = aapl_model.forecast(steps=forecast_steps)
msft_forecast = msft_model.forecast(steps=forecast_steps)

# Plot original series with forecasts
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(range(train_size, min_length), aapl_forecast, label='AAPL Forecast', color='red')
plt.plot(msft_close, label='MSFT Historical')
plt.plot(range(train_size, min_length), msft_forecast, label='MSFT Forecast', color='green')
plt.axvline(x=train_size, color='gray', linestyle='--', label='Train/Test Split')
plt.title('AAPL and MSFT Closing Prices with Forecasts')
plt.legend()
plt.show()

# Evaluate forecast accuracy
from sklearn.metrics import mean_squared_error
aapl_mse = mean_squared_error(aapl_test, aapl_forecast)
msft_mse = mean_squared_error(msft_test, msft_forecast)
print(f"\nAAPL Test MSE: {aapl_mse:.2f}")
print(f"MSFT Test MSE: {msft_mse:.2f}")
Please upload AAPL Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (8).xlsx
Please upload MSFT Excel file
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (8).xlsx

AAPL DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
MSFT DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Please enter the column name containing closing prices:  Close

Train size: 1188 (90%), Test size: 132 (10%)

Unit Root Tests for AAPL (Train):
ADF Test:
ADF Statistic: -1.2590
p-value: 0.6477
Critical Values: {'1%': -3.4358710597388042, '5%': -2.863977991064458, '10%': -2.5680680340944337}
Interpretation:
  - p-value >= 0.05: Fail to reject null - AAPL (Train) may be non-stationary

KPSS Test:
KPSS Statistic: 4.5849
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - AAPL (Train) is non-stationary

Unit Root Tests for MSFT (Train):
ADF Test:
ADF Statistic: -0.7677
p-value: 0.8284
Critical Values: {'1%': -3.435880398285223, '5%': -2.8639821109786747, '10%': -2.5680702282397765}
Interpretation:
  - p-value >= 0.05: Fail to reject null - MSFT (Train) may be non-stationary

KPSS Test:
KPSS Statistic: 4.4196
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - MSFT (Train) is non-stationary

Johansen Cointegration Test:
Trace statistic: [14.71147122  1.67317433]
Critical values (90%, 95%, 99%): [[13.4294 15.4943 19.9349]
 [ 2.7055  3.8415  6.6349]]
Interpretation:
  - r = 0: No cointegration at 95% confidence level
    Trace statistic (14.71) <= 95% critical value (15.49)
  - r = 1: No cointegration at 95% confidence level
    Trace statistic (1.67) <= 95% critical value (3.84)
Conclusion: No evidence of cointegration between AAPL and MSFT
<ipython-input-8-aaf7fb0c58d9>:66: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-8-aaf7fb0c58d9>:66: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
Best ARIMA model for AAPL (Train):
Order: (0, 1, 0)
AIC: 5808.26
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
Best ARIMA model for MSFT (Train):
Order: (2, 1, 2)
AIC: 7170.45
No description has been provided for this image
AAPL Test MSE: 274.89
MSFT Test MSE: 1011.65
In [ ]:
# Import required libraries
import pandas as pd
import numpy as np
from google.colab import files
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.vector_ar.vecm import coint_johansen
from statsmodels.tsa.seasonal import STL
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error

# Upload Excel files
print("Please upload AAPL Excel file (e.g., HistoricalPrices-4.xlsx)")
aapl_upload = files.upload()
aapl_filename = list(aapl_upload.keys())[0]
aapl_df = pd.read_excel(aapl_filename, header=1)  # Header en fila 1 (segunda fila)

print("Please upload MSFT Excel file (e.g., HistoricalPrices-3.xlsx)")
msft_upload = files.upload()
msft_filename = list(msft_upload.keys())[0]
msft_df = pd.read_excel(msft_filename, header=1)  # Header en fila 1 (segunda fila)

# Print available columns
print("\nAAPL DataFrame columns:", list(aapl_df.columns))
print("MSFT DataFrame columns:", list(msft_df.columns))

# Ask user for the correct column name
close_column = input("Please enter the column name containing closing prices (e.g., ' Close'): ")

# Extract closing prices
try:
    aapl_close = aapl_df[close_column]
    msft_close = msft_df[close_column]
except KeyError:
    print(f"Error: Column '{close_column}' not found in one or both DataFrames")
    print("Please check the column names and try again")
    raise

# Ensure both series have the same length
min_length = min(len(aapl_close), len(msft_close))
aapl_close = aapl_close[:min_length]
msft_close = msft_close[:min_length]  # Asegúrate de que MSFT tenga la misma longitud

# 1. Determine the presence of main trend and seasonality
print("\n1. Determining Trend and Seasonality:")

# Plot original series to check for trend and seasonality
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(msft_close, label='MSFT Historical')
plt.title('AAPL and MSFT Closing Prices (Trend Analysis)')
plt.legend()
plt.show()

# Check for seasonality (STL decomposition, assuming annual periodicity for daily data)
try:
    stl_aapl = STL(aapl_close, period=252)  # 252 trading days ~ 1 year
    result_aapl = stl_aapl.fit()
    stl_msft = STL(msft_close, period=252)
    result_msft = stl_msft.fit()

    # Plot STL decomposition
    plt.figure(figsize=(12,8))
    plt.subplot(4,1,1)
    plt.plot(aapl_close, label='AAPL Original')
    plt.legend()
    plt.subplot(4,1,2)
    plt.plot(result_aapl.trend, label='Trend')
    plt.legend()
    plt.subplot(4,1,3)
    plt.plot(result_aapl.seasonal, label='Seasonal')
    plt.legend()
    plt.subplot(4,1,4)
    plt.plot(result_aapl.resid, label='Residual')
    plt.legend()
    plt.suptitle('STL Decomposition for AAPL')
    plt.tight_layout()
    plt.show()

    plt.figure(figsize=(12,8))
    plt.subplot(4,1,1)
    plt.plot(msft_close, label='MSFT Original')
    plt.legend()
    plt.subplot(4,1,2)
    plt.plot(result_msft.trend, label='Trend')
    plt.legend()
    plt.subplot(4,1,3)
    plt.plot(result_msft.seasonal, label='Seasonal')
    plt.legend()
    plt.subplot(4,1,4)
    plt.plot(result_msft.resid, label='Residual')
    plt.legend()
    plt.suptitle('STL Decomposition for MSFT')
    plt.tight_layout()
    plt.show()

    # Interpret seasonality
    if np.std(result_aapl.seasonal.dropna()) < 0.1 * np.std(aapl_close):
        print("AAPL: No significant seasonality detected (seasonal component is small).")
    else:
        print("AAPL: Possible seasonality detected (check seasonal component in STL plot).")

    if np.std(result_msft.seasonal.dropna()) < 0.1 * np.std(msft_close):
        print("MSFT: No significant seasonality detected (seasonal component is small).")
    else:
        print("MSFT: Possible seasonality detected (check seasonal component in STL plot).")
except:
    print("STL decomposition failed or data length insufficient for seasonality analysis.")
    print("AAPL and MSFT: No significant seasonality assumed based on current analysis.")

# Interpret trend from plots
print("Trend Analysis: Both AAPL and MSFT show a clear trend (non-stationary behavior, likely upward or downward over time, visible in the plots).")

# 2. Determine if the data are stationary
print("\n2. Determining Stationarity:")

# Function for unit root tests with interpretation
def unit_root_tests(series, name):
    print(f"\nUnit Root Tests for {name}:")
    adf_result = adfuller(series)
    print("ADF Test:")
    print(f'ADF Statistic: {adf_result[0]:.4f}')
    print(f'p-value: {adf_result[1]:.4f}')
    print(f'Critical Values: {adf_result[4]}')
    print("Interpretation:")
    if adf_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be non-stationary")

    kpss_result = kpss(series)
    print("\nKPSS Test:")
    print(f'KPSS Statistic: {kpss_result[0]:.4f}')
    print(f'p-value: {kpss_result[1]:.4f}')
    print(f'Critical Values: {kpss_result[3]}')
    print("Interpretation:")
    if kpss_result[1] < 0.05:
        print(f"  - p-value < 0.05: Reject null hypothesis - {name} is non-stationary")
    else:
        print(f"  - p-value >= 0.05: Fail to reject null - {name} may be stationary")

# Perform unit root tests on original data
unit_root_tests(aapl_close, "AAPL (Original)")
unit_root_tests(msft_close, "MSFT (Original)")

# 3. Split the data into train (90%) and test (10%)
print("\n3. Splitting Data into Train (90%) and Test (10%):")
train_size = int(min_length * 0.9)  # 1188 observations
test_size = min_length - train_size  # 132 observations

aapl_train = aapl_close[:train_size]
aapl_test = aapl_close[train_size:]
msft_train = msft_close[:train_size]
msft_test = msft_close[train_size:]

print(f"Train size: {train_size} (90%), Test size: {test_size} (10%)")

# 4. Make the data stationary (if needed) with differencing
print("\n4. Making Data Stationary (Differencing if Needed):")

# Diferenciar las series (d=2, como en ARIMA de Códigos 2 y 3)
aapl_diff = aapl_close.diff().diff().dropna()
msft_diff = msft_close.diff().diff().dropna()

# Obtener las longitudes después de las diferencias
diff_min_length = min(len(aapl_diff), len(msft_diff))
diff_train_size = int(diff_min_length * 0.9)
diff_test_size = diff_min_length - diff_train_size

# Diferenciar train/test sets usando las nuevas longitudes
aapl_train_diff = aapl_diff[:diff_train_size]
aapl_test_diff = aapl_diff[diff_train_size:]
msft_train_diff = msft_diff[:diff_train_size]
msft_test_diff = msft_diff[diff_train_size:]

print(f"Differenced Train size: {diff_train_size} (90%), Differenced Test size: {diff_test_size} (10%)")

# Verificar estacionariedad en datos diferenciados
unit_root_tests(aapl_train_diff, "AAPL (Differenced Train)")
unit_root_tests(msft_train_diff, "MSFT (Differenced Train)")

# 5. Fit ARIMA models on differenced training data
print("\n5. Fitting ARIMA Models on Differenced Training Data:")

# Function to find best ARIMA model
def find_best_arima(series, name, max_p=3, max_d=0, max_q=3):  # max_d=0 since already differenced
    best_aic = float('inf')
    best_order = None
    for p in range(max_p + 1):
        for q in range(max_q + 1):
            try:
                model = ARIMA(series, order=(p, 0, q))  # d=0 porque ya diferenciamos
                results = model.fit()
                if results.aic < best_aic:
                    best_aic = results.aic
                    best_order = (p, 0, q)
            except:
                continue
    print(f"\nBest ARIMA model for {name}:")
    print(f"Order: {best_order}")
    print(f"AIC: {best_aic:.2f}")
    return best_order

# Find and fit best ARIMA models on differenced train data
aapl_order_diff = find_best_arima(aapl_train_diff, "AAPL (Differenced Train)")
msft_order_diff = find_best_arima(msft_train_diff, "MSFT (Differenced Train)")
aapl_model_diff = ARIMA(aapl_train_diff, order=aapl_order_diff).fit()
msft_model_diff = ARIMA(msft_train_diff, order=msft_order_diff).fit()

# 6. Forecast and evaluate on test set
print("\n6. Forecasting and Evaluating on Test Set:")
forecast_steps = len(aapl_test_diff)

# Forecast differenced series
aapl_forecast_diff = aapl_model_diff.forecast(steps=forecast_steps)
msft_forecast_diff = msft_model_diff.forecast(steps=forecast_steps)

# Revert differencing to get forecasts in original scale (correctly for d=2)
# Initialize forecast series with the last value before test set
aapl_forecast = pd.Series(index=aapl_test.index[:forecast_steps], dtype=float)
msft_forecast = pd.Series(index=msft_test.index[:forecast_steps], dtype=float)

# First value (revert first difference)
last_value_aapl = aapl_close.iloc[train_size - 1]
last_diff_aapl = aapl_close.diff().iloc[train_size - 1]
aapl_forecast.iloc[0] = last_value_aapl + aapl_forecast_diff.iloc[0] + last_diff_aapl

last_value_msft = msft_close.iloc[train_size - 1]
last_diff_msft = msft_close.diff().iloc[train_size - 1]
msft_forecast.iloc[0] = last_value_msft + msft_forecast_diff.iloc[0] + last_diff_msft

# Subsequent values (revert second difference)
for i in range(1, forecast_steps):
    aapl_forecast.iloc[i] = (aapl_forecast.iloc[i-1] + aapl_forecast_diff.iloc[i] +
                            (aapl_forecast_diff.iloc[i-1] if i > 1 else 0))
    msft_forecast.iloc[i] = (msft_forecast.iloc[i-1] + msft_forecast_diff.iloc[i] +
                            (msft_forecast_diff.iloc[i-1] if i > 1 else 0))

# Check for NaN values and drop or fill them
aapl_forecast = aapl_forecast.dropna()
aapl_test = aapl_test.loc[aapl_forecast.index]  # Alinear índices

msft_forecast = msft_forecast.dropna()
msft_test = msft_test.loc[msft_forecast.index]  # Alinear índices

# Evaluate forecast accuracy with MSE on original scale, ensuring no NaN
if len(aapl_test) > 0 and len(aapl_forecast) > 0:
    aapl_mse = mean_squared_error(aapl_test, aapl_forecast)
    print(f"\nAAPL Test MSE (Original Scale): {aapl_mse:.2f}")
else:
    print("\nAAPL: Insufficient data for MSE calculation due to NaN values or misalignment.")

if len(msft_test) > 0 and len(msft_forecast) > 0:
    msft_mse = mean_squared_error(msft_test, msft_forecast)
    print(f"MSFT Test MSE (Original Scale): {msft_mse:.2f}")
else:
    print("\nMSFT: Insufficient data for MSE calculation due to NaN values or misalignment.")

# Plot original series with forecasts
plt.figure(figsize=(12,6))
plt.plot(aapl_close, label='AAPL Historical')
plt.plot(aapl_forecast.index, aapl_forecast, label='AAPL Forecast', color='red')
plt.plot(msft_close, label='MSFT Historical')
plt.plot(msft_forecast.index, msft_forecast, label='MSFT Forecast', color='green')
plt.axvline(x=train_size, color='gray', linestyle='--', label='Train/Test Split')
plt.title('AAPL and MSFT Closing Prices with Forecasts')
plt.legend()
plt.show()

# Plot differenced series to confirm stationarity
# Usar índices adecuados para las series diferenciadas
diff_train_indices = range(len(aapl_train_diff))
diff_test_indices = range(diff_train_size, diff_train_size + len(aapl_test_diff))

plt.figure(figsize=(12,6))
plt.plot(diff_train_indices, aapl_train_diff, label='AAPL Differenced (Train)')
plt.plot(diff_test_indices, aapl_test_diff, label='AAPL Differenced (Test)')
plt.plot(diff_train_indices, aapl_model_diff.fittedvalues, label='AAPL Fitted (Differenced)', color='red', linestyle='--')
plt.title('AAPL Differenced Series (Stationary)')
plt.legend()
plt.show()

plt.figure(figsize=(12,6))
plt.plot(diff_train_indices, msft_train_diff, label='MSFT Differenced (Train)')
plt.plot(diff_test_indices, msft_test_diff, label='MSFT Differenced (Test)')
plt.plot(diff_train_indices, msft_model_diff.fittedvalues, label='MSFT Fitted (Differenced)', color='green', linestyle='--')
plt.title('MSFT Differenced Series (Stationary)')
plt.legend()
plt.show()

# Print first 5 forecast values
print("\nAAPL Forecast Values (next 5 periods):")
print(aapl_forecast[:5])
print("\nMSFT Forecast Values (next 5 periods):")
print(msft_forecast[:5])
Please upload AAPL Excel file (e.g., HistoricalPrices-4.xlsx)
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-4.xlsx to HistoricalPrices-4 (12).xlsx
Please upload MSFT Excel file (e.g., HistoricalPrices-3.xlsx)
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving HistoricalPrices-3.xlsx to HistoricalPrices-3 (12).xlsx

AAPL DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
MSFT DataFrame columns: ['Date', ' Open', ' High', ' Low', ' Close', ' Volume']
Please enter the column name containing closing prices (e.g., ' Close'):  Close

1. Determining Trend and Seasonality:
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
AAPL: Possible seasonality detected (check seasonal component in STL plot).
MSFT: Possible seasonality detected (check seasonal component in STL plot).
Trend Analysis: Both AAPL and MSFT show a clear trend (non-stationary behavior, likely upward or downward over time, visible in the plots).

2. Determining Stationarity:

Unit Root Tests for AAPL (Original):
ADF Test:
ADF Statistic: -1.0119
p-value: 0.7489
Critical Values: {'1%': -3.4353174541055567, '5%': -2.863733732389869, '10%': -2.5679379527245407}
Interpretation:
  - p-value >= 0.05: Fail to reject null - AAPL (Original) may be non-stationary

KPSS Test:
KPSS Statistic: 5.0423
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - AAPL (Original) is non-stationary

Unit Root Tests for MSFT (Original):
ADF Test:
ADF Statistic: -0.7045
p-value: 0.8456
Critical Values: {'1%': -3.4353516488758684, '5%': -2.8637488209107196, '10%': -2.5679459879960373}
Interpretation:
  - p-value >= 0.05: Fail to reject null - MSFT (Original) may be non-stationary

KPSS Test:
KPSS Statistic: 4.9191
p-value: 0.0100
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - MSFT (Original) is non-stationary

3. Splitting Data into Train (90%) and Test (10%):
Train size: 1188 (90%), Test size: 132 (10%)

4. Making Data Stationary (Differencing if Needed):
Differenced Train size: 1186 (90%), Differenced Test size: 132 (10%)

Unit Root Tests for AAPL (Differenced Train):
<ipython-input-12-34c81efa34f2>:132: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-12-34c81efa34f2>:132: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series)
ADF Test:
ADF Statistic: -12.7716
p-value: 0.0000
Critical Values: {'1%': -3.4359803948357723, '5%': -2.8640262259528595, '10%': -2.5680937227063922}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - AAPL (Differenced Train) is stationary

KPSS Test:
KPSS Statistic: 0.2231
p-value: 0.1000
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value >= 0.05: Fail to reject null - AAPL (Differenced Train) may be stationary

Unit Root Tests for MSFT (Differenced Train):
ADF Test:
ADF Statistic: -13.9784
p-value: 0.0000
Critical Values: {'1%': -3.435975551167148, '5%': -2.864024089129731, '10%': -2.568092584685664}
Interpretation:
  - p-value < 0.05: Reject null hypothesis - MSFT (Differenced Train) is stationary

KPSS Test:
KPSS Statistic: 0.2059
p-value: 0.1000
Critical Values: {'10%': 0.347, '5%': 0.463, '2.5%': 0.574, '1%': 0.739}
Interpretation:
  - p-value >= 0.05: Fail to reject null - MSFT (Differenced Train) may be stationary

5. Fitting ARIMA Models on Differenced Training Data:
<ipython-input-12-34c81efa34f2>:132: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series)
<ipython-input-12-34c81efa34f2>:132: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
Best ARIMA model for AAPL (Differenced Train):
Order: (0, 0, 1)
AIC: 5813.02
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
Best ARIMA model for MSFT (Differenced Train):
Order: (2, 0, 1)
AIC: 7176.07
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: An unsupported index was provided. As a result, forecasts cannot be generated. To use the model for forecasting, use one of the supported classes of index.
  self._init_dates(dates, freq)
6. Forecasting and Evaluating on Test Set:

AAPL Test MSE (Original Scale): 270.75
MSFT Test MSE (Original Scale): 1061.93
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:837: ValueWarning: No supported index is available. Prediction results will be given with an integer index beginning at `start`.
  return get_prediction_index(
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:837: FutureWarning: No supported index is available. In the next version, calling this method in a model without a supported index will result in an exception.
  return get_prediction_index(
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:837: ValueWarning: No supported index is available. Prediction results will be given with an integer index beginning at `start`.
  return get_prediction_index(
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
AAPL Forecast Values (next 5 periods):
1188    88.075382
1189    88.075368
1190    88.075340
1191    88.075311
1192    88.075283
dtype: float64

MSFT Forecast Values (next 5 periods):
1188    196.239608
1189    195.950553
1190    196.341077
1191    197.011519
1192    196.961891
dtype: float64
In [ ]:
# Import required libraries
from google.colab import drive
import os
import subprocess

# Montar Google Drive
drive.mount('/content/drive')

# Explorar Google Drive para encontrar el archivo
print("Explorando Google Drive para encontrar el notebook...")

# Lista los directorios principales en My Drive
print("\nDirectorios principales en My Drive:")
!ls "/content/drive/My Drive/"

# Lista los directorios en '6to semestre: feb-jun 2025' (si existe)
semestre_dir = "/content/drive/My Drive/6to semestre: feb-jun 2025/"
if os.path.exists(semestre_dir):
    print("\nContenido de '6to semestre: feb-jun 2025':")
    !ls "{semestre_dir}"
else:
    print(f"\nEl directorio '{semestre_dir}' no existe. Verifica el nombre exacto.")

# Lista los directorios en 'seises de tiempo' (si existe)
tiempo_dir = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/"
if os.path.exists(tiempo_dir):
    print("\nContenido de 'seises de tiempo':")
    !ls "{tiempo_dir}"
else:
    print(f"\nEl directorio '{tiempo_dir}' no existe. Verifica el nombre exacto.")

# Pide al usuario que ingrese la ruta exacta del notebook
notebook_path = input("\nPor favor, ingresa la ruta exacta del notebook (e.g., /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/A00838009_of_COST_WMT_XLS_ARIMA.ipynb): ")

# Verificar si el archivo existe
if not os.path.exists(notebook_path):
    print(f"Error: El archivo {notebook_path} no se encontró en Google Drive.")
    print("Por favor, verifica la ruta y asegúrate de que el archivo esté en la ubicación correcta.")
else:
    # Crear la carpeta de salida si no existe
    output_dir = os.path.dirname(notebook_path)
    os.makedirs(output_dir, exist_ok=True)

    # Nombre del archivo HTML de salida (mismo nombre que el notebook pero con extensión .html)
    html_output = os.path.join(output_dir, os.path.splitext(os.path.basename(notebook_path))[0] + '.html')

    # Usar nbconvert para convertir el notebook a HTML
    try:
        # Comando para convertir el notebook a HTML usando nbconvert
        command = f"jupyter nbconvert --to html '{notebook_path}' --output '{html_output}'"
        subprocess.run(command, shell=True, check=True)
        print(f"Notebook convertido exitosamente a HTML. El archivo se guardó como: {html_output}")
    except subprocess.CalledProcessError as e:
        print(f"Error al convertir el notebook a HTML: {e}")
        print("Asegúrate de que Jupyter nbconvert esté instalado en tu entorno. Puedes instalarlo con:")
        print("!pip install nbconvert")

# Si necesitas instalar nbconvert (descomenta y corre si es necesario)
# !pip install nbconvert
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
Explorando Google Drive para encontrar el notebook...

Directorios principales en My Drive:
'23 swap contract.gdoc'
'5to semestre'
'6to semestre: feb-jun 2025'
 AC1_SP1_Team7.gdoc
'AC3_SP1_Team 7.gdoc'
'act 2 drawdown solutions.gdoc'
'act 7_team 3.gdoc'
'Actividad 2 M2.gdoc'
'Actividad M1_1.3 equipo c.gdoc'
'Actividad M1_3.1 equipo c.gdoc'
'Actividad M1_3.1: estrategias disruptivas.gdoc'
'Actividad M2_2.3 equipo c.gdoc'
'Actividad supervisada: Elaboración del cronograma #4.gdoc'
'Activity 10 Tesla in Nuevo Leon.gdoc'
'Activity 22. GHG Emissions.gdoc'
'Activity 25.gdoc'
'Activity 2: Defining strategic _Team 3.gdoc'
'Activity 3B: Mind map and Mission and Vision of a organization.gdoc'
'Activity 4.gdoc'
'Activity 5_team 3.gdoc'
'AE 2: Estudio de Caso - Elementos para el diseño de una cultura organizacional.gdoc'
'AE4: Ecosistema para la transformación.gdoc'
'Assignment: gathering data.gdoc'
'Avance de reto 1.gdoc'
'Avance de reto 3: Creación de valor compartido.gdoc'
'Avance reto semana 3.gdoc'
'Bibliographic record.docx'
'Bob services.gsite'
'Bob Services.gsite'
'Captura de pantalla (127).png'
'Captura de pantalla (130).png'
'Captura de pantalla (229).png'
'Captura de pantalla (350).png'
'Captura de pantalla (359).png'
'Captura de pantalla (363).png'
'Captura de pantalla (423).png'
'Caso 2.gdoc'
'Caso integrador'$'\n''El 1 de Enero del 2025, los activ....gsheet'
'Caso sesión 4.gdoc'
 CA_SP1_Team7.gdoc
 Classroom
'Colab Notebooks'
'como puedo llenar un tablero de validación?.gsheet'
'Confirmación de equipo .gform'
'Confirmación Listado Proyectos Solidarios MTY_Confirmación Inscripciones INV24_Tabla.csv'
'Confirmación Listado Proyectos Solidarios MTY_Confirmación Inscripciones INV24_Tabla.gsheet'
'Copia de Cálculo Promedio | Liderazgo.gsheet'
'Copia de GUÍA DE ESTUDIO.gdoc'
'Copia de Guía Quiz Sushi Rainbow .gdoc'
'Copia de HUELLA LSH - Rúbrica FJ23.gdoc'
'Copia de Ikigai-Semana Tec.gjam'
'Copia de Ikigai-Semana Tec.pdf'
'Copia de Respuestas Ejercicios de Repaso.xlsx'
'Copia de Value Chain by Slidesgo.gslides'
'Cuestionario vacío (Respuestas).gsheet'
'CV federico vinatier (1).pdf'
'CV federico vinatier (2).pdf'
'CV federico vinatier (3).pdf'
'CV federico vinatier.pdf'
 Diamond_4Team.gdoc
'Documento integrador_eq#4.gdoc'
'Documento sin título.gdoc'
 DS_STORY_CANVAS.pptx
'EDT equipo 4.gdoc'
 Elefante_FedericoVinatier_semana12.jpg
'entregable 2 reto eq#4.gdoc'
'Entrevista 1.mov'
'Entrevista 2.mov'
'entrevista trabajo de campo.m4a'
'Equipo 3_entregable 2.gsheet'
'Equipo3_Mapeo Entregable2.gsheet'
'Equipo4_Documentación del plan de ejecución del proyecto.gdoc'
'#Equipo5_Análisis Financiero.gdoc'
 equipo#5_KPI.docx.gdoc
'EQUIPO VERDE.pdf'
'estos son los cálculos que yo saqué, me ayudas a....gsheet'
'Estructuras simbolicas '
'Evidence 1_Team 4.gdoc'
'evidencia 2.gdoc'
'evidencia taller testigo activo.pdf'
'final evidence.mp4'
"Freddy's interviews"
 guión.gdoc
'guión para presentación.gdoc'
'Hoja de cálculo sin título (1).gsheet'
'Hoja de cálculo sin título.gsheet'
'Huella LSH-Federico Vinatier Villarreal A00838009.pdf'
'Imagen de WhatsApp 2023-09-25 a las 14.17.36.jpg'
'Imagen de WhatsApp 2023-10-08 a las 18.07.14_65b048d5.jpg'
'Imagen de WhatsApp 2023-10-14 a las 16.26.16_7c721139.jpg'
'in-class assignment team #1.gdoc'
'Inscripciones_Club Deportivo de Futból Americano Águilas, A.C. .xlsx'
'invierno 2025'
'Líder con sentido humano'
'Lista tentativa club aguilas.gsheet'
'M1 act 7.gdoc'
'M1. Actividad 2. Análisis Externo.gdoc'
'M2 S3 CCE Edos financieros-1.xlsx'
'M2 task 1.gdoc'
'M2 task 4: SWOT analysis.gdoc'
'M3 A1 eq#4.gdoc'
'M3. Actividad 2. Analisis Horizontal y vertical.gdoc'
'Marketing Plan Presentation team 4.pdf'
'Market validation.gform'
'notas sf grupo bimbo.gdoc'
'Portada Tec.gdoc'
'primer avance de reto eq#4.gdoc'
'Professional Resume (1).pdf'
'Professional Resume.pdf'
'reflexión grupal.gdoc'
'reflexión video 2.mp4'
'Reto-A3. Resumen Ejecutivo.gdoc'
'Reto - AE4: Diagnóstico de potencial de cambio de la empresa.gdoc'
'Reto - AE6: Entrega final al socio formador.gdoc'
'Reto Avance 2.gdoc'
'S5_P2_Equipo5_La primera cita de Riley.gdoc'
'Scavenger Hunt: Análisis e investigación de ODS Y OSF.gdoc'
'TE 3: Ciclos de Vida.gdoc'
'TE 3: Ciclos de Vida.gsheet'
'Team #2_Task 1.gdoc'
'Team 2_Task 2.gdoc'
'Team 2_Task 4.gdoc'
'Team 2_task 5.gdoc'
'Team 3_Activity 6. Strategy Map and KPIs.gdoc'
 Trabajo
'video metaverse experience.mp4'
'video obra pública.mp4'
'Welcome Beware[1].pdf'

Contenido de '6to semestre: feb-jun 2025':
'algoritmos y análisis de datos'  'diagnostico financiero'  'series de tiempo'

Contenido de 'seises de tiempo':
'A00838009_of COST WMT XLS ARIMA.ipynb'   HistoricalPrices-AAPL.xlsx
 act_2_2.ipynb				  HistoricalPrices-MSFT.xlsx
'ejemplo clase 21 feb 25 intro.ipynb'	 'notas series de tiempo.gdoc'

Por favor, ingresa la ruta exacta del notebook (e.g., /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/A00838009_of_COST_WMT_XLS_ARIMA.ipynb): /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/A00838009_of COST WMT XLS ARIMA.ipynb
Notebook convertido exitosamente a HTML. El archivo se guardó como: /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/A00838009_of COST WMT XLS ARIMA.html
In [ ]:
# Importar bibliotecas necesarias
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller, kpss, coint
from statsmodels.tsa.seasonal import STL
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.arima.model import ARIMA as ARIMA_model
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings("ignore")

# Crear DataFrame con los datos proporcionados
data = {
    'Fecha_AMZN': ['2025-03-07 11:48:00', '2025-03-07 11:45:00', '2025-03-07 11:42:00', '2025-03-07 11:39:00', '2025-03-07 11:36:00', '2025-03-07 11:33:00', '2025-03-07 11:30:00', '2025-03-07 11:27:00', '2025-03-07 11:24:00', '2025-03-07 11:21:00', '2025-03-07 11:18:00', '2025-03-07 11:15:00', '2025-03-07 11:12:00', '2025-03-07 11:09:00', '2025-03-07 11:06:00', '2025-03-07 11:03:00', '2025-03-07 11:00:00', '2025-03-07 10:57:00', '2025-03-07 10:54:00', '2025-03-07 10:51:00', '2025-03-07 10:48:00', '2025-03-07 10:45:00', '2025-03-07 10:42:00', '2025-03-07 10:39:00', '2025-03-07 10:36:00', '2025-03-07 10:33:00', '2025-03-07 10:30:00', '2025-03-07 10:27:00', '2025-03-07 10:24:00', '2025-03-07 10:21:00', '2025-03-07 10:18:00', '2025-03-07 10:15:00', '2025-03-07 10:12:00', '2025-03-07 10:09:00'],
    'AMZN_Lst_Trd': [195.75, 195.72, 195.565, 195.09, 195.075, 194.63, 194.085, 193.66, 193.575, 193.37, 193.005, 192.86, 193.27, 192.79, 193.08, 193.345, 193.8, 193.68, 193.65, 192.82, 192.95, 193.535, 194.04, 193.71, 193.94, 194.446, 194.2, 194.57, 194.67, 195.415, 195.48, 195.88, 196.2, 196.43],
    'Fecha_TSLA': ['2025-03-07 11:39:00', '2025-03-07 11:36:00', '2025-03-07 11:33:00', '2025-03-07 11:30:00', '2025-03-07 11:27:00', '2025-03-07 11:24:00', '2025-03-07 11:21:00', '2025-03-07 11:18:00', '2025-03-07 11:15:00', '2025-03-07 11:12:00', '2025-03-07 11:09:00', '2025-03-07 11:06:00', '2025-03-07 11:03:00', '2025-03-07 11:00:00', '2025-03-07 10:57:00', '2025-03-07 10:54:00', '2025-03-07 10:51:00', '2025-03-07 10:48:00', '2025-03-07 10:45:00', '2025-03-07 10:42:00', '2025-03-07 10:39:00', '2025-03-07 10:36:00', '2025-03-07 10:33:00', '2025-03-07 10:30:00', '2025-03-07 10:27:00', '2025-03-07 10:24:00', '2025-03-07 10:21:00', '2025-03-07 10:18:00', '2025-03-07 10:15:00', '2025-03-07 10:12:00', '2025-03-07 10:09:00', '2025-03-07 10:06:00', '2025-03-07 10:03:00', '2025-03-07 10:00:00'],
    'TSLA_Lst_Trd': [257.53, 257.33, 256.915, 255.86, 255.58, 254.6809, 254.4, 253.9485, 253.2299, 253.95, 252.71, 253.26, 253.825, 253.665, 252.73, 252.7282, 250.995, 251.94, 253.62, 253.31, 253.27, 253.97, 255.275, 254.9, 254.615, 255.39, 256.975, 256.53, 257.6586, 258.925, 258.63, 259.12, 258.77, 258.035],
    'Fecha_SBUX': ['2025-03-07 11:42:00', '2025-03-07 11:39:00', '2025-03-07 11:36:00', '2025-03-07 11:33:00', '2025-03-07 11:30:00', '2025-03-07 11:27:00', '2025-03-07 11:24:00', '2025-03-07 11:21:00', '2025-03-07 11:18:00', '2025-03-07 11:15:00', '2025-03-07 11:12:00', '2025-03-07 11:09:00', '2025-03-07 11:06:00', '2025-03-07 11:03:00', '2025-03-07 11:00:00', '2025-03-07 10:57:00', '2025-03-07 10:54:00', '2025-03-07 10:51:00', '2025-03-07 10:48:00', '2025-03-07 10:45:00', '2025-03-07 10:42:00', '2025-03-07 10:39:00', '2025-03-07 10:36:00', '2025-03-07 10:33:00', '2025-03-07 10:30:00', '2025-03-07 10:27:00', '2025-03-07 10:24:00', '2025-03-07 10:21:00', '2025-03-07 10:18:00', '2025-03-07 10:15:00', '2025-03-07 10:12:00', '2025-03-07 10:09:00', '2025-03-07 10:06:00', '2025-03-07 10:03:00'],
    'SBUX_Lst_Trd': [104.38, 104.19, 104.13, 103.92, 103.89, 103.92, 103.95, 103.91, 103.98, 103.96, 104.295, 104.01, 104.005, 103.94, 103.995, 104.08, 103.99, 103.75, 103.83, 103.87, 104.03, 104.01, 104.1638, 104.15, 104.06, 104.03, 103.855, 104.18, 104.14, 104.27, 104.43, 104.57, 104.51, 104.555],
    'Fecha_NKE': ['2025-03-07 11:45:00', '2025-03-07 11:42:00', '2025-03-07 11:39:00', '2025-03-07 11:36:00', '2025-03-07 11:33:00', '2025-03-07 11:30:00', '2025-03-07 11:27:00', '2025-03-07 11:24:00', '2025-03-07 11:21:00', '2025-03-07 11:18:00', '2025-03-07 11:15:00', '2025-03-07 11:12:00', '2025-03-07 11:09:00', '2025-03-07 11:06:00', '2025-03-07 11:03:00', '2025-03-07 11:00:00', '2025-03-07 10:57:00', '2025-03-07 10:54:00', '2025-03-07 10:51:00', '2025-03-07 10:48:00', '2025-03-07 10:45:00', '2025-03-07 10:42:00', '2025-03-07 10:39:00', '2025-03-07 10:36:00', '2025-03-07 10:33:00', '2025-03-07 10:30:00', '2025-03-07 10:27:00', '2025-03-07 10:24:00', '2025-03-07 10:21:00', '2025-03-07 10:18:00', '2025-03-07 10:15:00', '2025-03-07 10:12:00', '2025-03-07 10:09:00', '2025-03-07 10:06:00'],
    'NKE_Lst_Trd': [77.835, 77.75, 77.65, 77.64, 77.49, 77.48, 77.475, 77.49, 77.47, 77.37, 77.4, 77.45, 77.34, 77.3316, 77.37, 77.44, 77.345, 77.23, 77.04, 77.085, 77.14, 77.04, 77.25, 77.43, 77.415, 77.465, 77.435, 77.3, 77.42, 77.54, 77.56, 77.585, 77.565, 77.58]
}

# Convertir a DataFrame
df = pd.DataFrame(data)

# Convertir columnas de fechas a datetime
df['Fecha_AMZN'] = pd.to_datetime(df['Fecha_AMZN'])
df['Fecha_TSLA'] = pd.to_datetime(df['Fecha_TSLA'])
df['Fecha_SBUX'] = pd.to_datetime(df['Fecha_SBUX'])
df['Fecha_NKE'] = pd.to_datetime(df['Fecha_NKE'])

# Establecer las fechas como índices
amzn_series = pd.Series(df['AMZN_Lst_Trd'].values, index=df['Fecha_AMZN'], name='AMZN')
tsla_series = pd.Series(df['TSLA_Lst_Trd'].values, index=df['Fecha_TSLA'], name='TSLA')
sbux_series = pd.Series(df['SBUX_Lst_Trd'].values, index=df['Fecha_SBUX'], name='SBUX')
nke_series = pd.Series(df['NKE_Lst_Trd'].values, index=df['Fecha_NKE'], name='NKE')

# Diccionario para manejar todas las series
series_dict = {
    'AMZN': amzn_series,
    'TSLA': tsla_series,
    'SBUX': sbux_series,
    'NKE': nke_series
}

# 1. Graficar las series originales para análisis de tendencia
plt.figure(figsize=(12, 6))
for name, series in series_dict.items():
    plt.plot(series, label=name)
plt.title('Closing Prices (Trend Analysis)')
plt.xlabel('Date')
plt.ylabel('Price')
plt.legend()
plt.show()

# 2. Descomposición STL para evaluar tendencia y estacionalidad
for name, series in series_dict.items():
    stl = STL(series, period=13)  # Periodo tentativo para datos intradía
    result = stl.fit()

    # Graficar la descomposición
    plt.figure(figsize=(10, 8))
    plt.subplot(411)
    plt.plot(series, label='Original')
    plt.legend(loc='upper left')
    plt.subplot(412)
    plt.plot(result.trend, label='Trend')
    plt.legend(loc='upper left')
    plt.subplot(413)
    plt.plot(result.seasonal, label='Seasonal')
    plt.legend(loc='upper left')
    plt.subplot(414)
    plt.plot(result.resid, label='Residual')
    plt.legend(loc='upper left')
    plt.suptitle(f'STL Decomposition for {name}')
    plt.tight_layout(rect=[0, 0, 1, 0.95])
    plt.show()

    # Evaluar si hay estacionalidad significativa
    seasonal_std = np.std(result.seasonal)
    series_std = np.std(series)
    if seasonal_std / series_std < 0.1:
        print(f"{name}: No significant seasonality detected (seasonal std / series std = {seasonal_std / series_std:.4f})")
    else:
        print(f"{name}: Possible seasonality detected (seasonal std / series std = {seasonal_std / series_std:.4f})")

# 3. Pruebas de raíz unitaria (ADF y KPSS)
def adf_test(series, name):
    result = adfuller(series, autolag='AIC')
    print(f'\nADF Test for {name} (Original):')
    print(f'ADF Statistic: {result[0]:.4f}')
    print(f'p-value: {result[1]:.4f}')
    if result[1] <= 0.05:
        print(f'Reject null hypothesis - {name} (Original) is stationary')
    else:
        print(f'Fail to reject null - {name} (Original) may be non-stationary')

def kpss_test(series, name):
    result = kpss(series, regression='c')
    print(f'\nKPSS Test for {name} (Original):')
    print(f'KPSS Statistic: {result[0]:.4f}')
    print(f'p-value: {result[1]:.4f}')
    if result[1] <= 0.05:
        print(f'Reject null hypothesis - {name} (Original) is non-stationary')
    else:
        print(f'Fail to reject null - {name} (Original) may be stationary')

# Aplicar pruebas a cada serie
for name, series in series_dict.items():
    adf_test(series, name)
    kpss_test(series, name)

# 4. Prueba de cointegración entre pares de series
def cointegration_test(series1, series2, name1, name2):
    score, p_value, _ = coint(series1, series2)
    print(f'\nCointegration Test between {name1} and {name2}:')
    print(f'p-value: {p_value:.4f}')
    if p_value < 0.05:
        print(f'Reject null hypothesis - {name1} and {name2} are cointegrated')
    else:
        print(f'Fail to reject null - {name1} and {name2} are not cointegrated')

# Realizar prueba de cointegración para todos los pares posibles
series_list = list(series_dict.items())
for i in range(len(series_list)):
    for j in range(i + 1, len(series_list)):
        name1, series1 = series_list[i]
        name2, series2 = series_list[j]
        # Asegurarnos de que las series tengan el mismo índice
        aligned_series1, aligned_series2 = series1.align(series2, join='inner')
        cointegration_test(aligned_series1, aligned_series2, name1, name2)

# 5. Diferenciar las series para hacerlas estacionarias (si es necesario)
d = 1  # Número de diferenciaciones
diff_series_dict = {}
for name, series in series_dict.items():
    diff_series = series.diff(d).dropna()
    diff_series_dict[name] = diff_series

    # Graficar series diferenciadas
    plt.figure(figsize=(10, 4))
    plt.plot(diff_series)
    plt.title(f'{name} Differenced Series (d={d})')
    plt.xlabel('Date')
    plt.ylabel('Differenced Price')
    plt.show()

    # Pruebas de raíz unitaria en las series diferenciadas
    adf_test(diff_series, f'{name} (Differenced)')
    kpss_test(diff_series, f'{name} (Differenced)')

# 6. Dividir los datos en train y test (90% train, 10% test)
train_test_dict = {}
for name, series in series_dict.items():
    train_size = int(len(series) * 0.9)
    train, test = series[:train_size], series[train_size:]
    train_test_dict[name] = (train, test)

# 7. Ajustar modelos ARMA en datos diferenciados y ARIMA en datos originales
arima_results = {}
for name, (train, test) in train_test_dict.items():
    # Diferenciar datos de entrenamiento
    diff_train = train.diff(d).dropna()

    # Ajustar modelo ARMA (p, q) en datos diferenciados (d=0 porque ya está diferenciado)
    best_aic = float('inf')
    best_order = None
    best_model = None
    for p in range(3):
        for q in range(3):
            try:
                model = ARIMA(diff_train, order=(p, 0, q))
                model_fit = model.fit()
                if model_fit.aic < best_aic:
                    best_aic = model_fit.aic
                    best_order = (p, 0, q)
                    best_model = model_fit
            except:
                continue

    print(f'\nBest ARMA Model for {name} (Differenced): Order={best_order}, AIC={best_aic:.2f}')

    # Pronosticar con ARMA (en escala diferenciada)
    forecast_diff = best_model.forecast(steps=len(test))
    print(f'Forecast (Differenced) for {name} (first 5 periods): {forecast_diff[:5].values}')

    # Revertir la diferenciación para obtener pronósticos en la escala original
    last_value = train.iloc[-1]
    forecast = np.cumsum(np.concatenate([[0], forecast_diff])) + last_value
    forecast = forecast[1:]  # Eliminar el primer valor (0)

    # Evaluar el modelo ARMA
    mse = mean_squared_error(test, forecast)
    print(f'MSE for {name} (ARMA): {mse:.2f}')

    # Ajustar modelo ARIMA en datos originales
    best_aic_arima = float('inf')
    best_order_arima = None
    best_model_arima = None
    for p in range(3):
        for d in range(2):
            for q in range(3):
                try:
                    model = ARIMA(train, order=(p, d, q))
                    model_fit = model.fit()
                    if model_fit.aic < best_aic_arima:
                        best_aic_arima = model_fit.aic
                        best_order_arima = (p, d, q)
                        best_model_arima = model_fit
                except:
                    continue

    print(f'\nBest ARIMA Model for {name}: Order={best_order_arima}, AIC={best_aic_arima:.2f}')

    # Pronosticar con ARIMA
    forecast_arima = best_model_arima.forecast(steps=len(test))
    print(f'Forecast (ARIMA) for {name} (first 5 periods): {forecast_arima[:5].values}')

    # Evaluar el modelo ARIMA
    mse_arima = mean_squared_error(test, forecast_arima)
    print(f'MSE for {name} (ARIMA): {mse_arima:.2f}')

    # Guardar resultados
    arima_results[name] = (best_model_arima, forecast_arima)

    # Graficar pronósticos
    plt.figure(figsize=(12, 6))
    plt.plot(train.index, train, label='Train')
    plt.plot(test.index, test, label='Test', color='green')
    plt.plot(test.index, forecast_arima, label='ARIMA Forecast', color='red')
    plt.title(f'{name} Closing Prices with ARIMA Forecast')
    plt.xlabel('Date')
    plt.ylabel('Price')
    plt.legend()
    plt.show()
No description has been provided for this image
No description has been provided for this image
AMZN: Possible seasonality detected (seasonal std / series std = 0.4020)
No description has been provided for this image
TSLA: Possible seasonality detected (seasonal std / series std = 0.2688)
No description has been provided for this image
SBUX: Possible seasonality detected (seasonal std / series std = 0.6186)
No description has been provided for this image
NKE: Possible seasonality detected (seasonal std / series std = 0.4479)

ADF Test for AMZN (Original):
ADF Statistic: -0.4724
p-value: 0.8973
Fail to reject null - AMZN (Original) may be non-stationary

KPSS Test for AMZN (Original):
KPSS Statistic: 0.2668
p-value: 0.1000
Fail to reject null - AMZN (Original) may be stationary

ADF Test for TSLA (Original):
ADF Statistic: -0.9675
p-value: 0.7649
Fail to reject null - TSLA (Original) may be non-stationary

KPSS Test for TSLA (Original):
KPSS Statistic: 0.3539
p-value: 0.0970
Fail to reject null - TSLA (Original) may be stationary

ADF Test for SBUX (Original):
ADF Statistic: -1.6361
p-value: 0.4643
Fail to reject null - SBUX (Original) may be non-stationary

KPSS Test for SBUX (Original):
KPSS Statistic: 0.3770
p-value: 0.0871
Fail to reject null - SBUX (Original) may be stationary

ADF Test for NKE (Original):
ADF Statistic: -2.3597
p-value: 0.1534
Fail to reject null - NKE (Original) may be non-stationary

KPSS Test for NKE (Original):
KPSS Statistic: 0.2779
p-value: 0.1000
Fail to reject null - NKE (Original) may be stationary

Cointegration Test between AMZN and TSLA:
p-value: 0.7588
Fail to reject null - AMZN and TSLA are not cointegrated

Cointegration Test between AMZN and SBUX:
p-value: 0.6028
Fail to reject null - AMZN and SBUX are not cointegrated

Cointegration Test between AMZN and NKE:
p-value: 0.9488
Fail to reject null - AMZN and NKE are not cointegrated

Cointegration Test between TSLA and SBUX:
p-value: 0.4285
Fail to reject null - TSLA and SBUX are not cointegrated

Cointegration Test between TSLA and NKE:
p-value: 0.8528
Fail to reject null - TSLA and NKE are not cointegrated

Cointegration Test between SBUX and NKE:
p-value: 0.5015
Fail to reject null - SBUX and NKE are not cointegrated
No description has been provided for this image
ADF Test for AMZN (Differenced) (Original):
ADF Statistic: -4.5585
p-value: 0.0002
Reject null hypothesis - AMZN (Differenced) (Original) is stationary

KPSS Test for AMZN (Differenced) (Original):
KPSS Statistic: 0.7276
p-value: 0.0110
Reject null hypothesis - AMZN (Differenced) (Original) is non-stationary
No description has been provided for this image
ADF Test for TSLA (Differenced) (Original):
ADF Statistic: -5.2308
p-value: 0.0000
Reject null hypothesis - TSLA (Differenced) (Original) is stationary

KPSS Test for TSLA (Differenced) (Original):
KPSS Statistic: 0.5176
p-value: 0.0377
Reject null hypothesis - TSLA (Differenced) (Original) is non-stationary
No description has been provided for this image
ADF Test for SBUX (Differenced) (Original):
ADF Statistic: -6.7711
p-value: 0.0000
Reject null hypothesis - SBUX (Differenced) (Original) is stationary

KPSS Test for SBUX (Differenced) (Original):
KPSS Statistic: 0.3557
p-value: 0.0963
Fail to reject null - SBUX (Differenced) (Original) may be stationary
No description has been provided for this image
ADF Test for NKE (Differenced) (Original):
ADF Statistic: -4.7783
p-value: 0.0001
Reject null hypothesis - NKE (Differenced) (Original) is stationary

KPSS Test for NKE (Differenced) (Original):
KPSS Statistic: 0.3675
p-value: 0.0911
Fail to reject null - NKE (Differenced) (Original) may be stationary

Best ARMA Model for AMZN (Differenced): Order=(0, 0, 0), AIC=31.51
Forecast (Differenced) for AMZN (first 5 periods): [-0.01155664 -0.01155664 -0.01155664 -0.01155664]
MSE for AMZN (ARMA): 0.51

Best ARIMA Model for AMZN: Order=(0, 1, 0), AIC=29.54
Forecast (ARIMA) for AMZN (first 5 periods): [195.415 195.415 195.415 195.415]
MSE for AMZN (ARIMA): 0.47
No description has been provided for this image
Best ARMA Model for TSLA (Differenced): Order=(0, 0, 0), AIC=78.21
Forecast (Differenced) for TSLA (first 5 periods): [0.04809849 0.04809849 0.04809849 0.04809849]
MSE for TSLA (ARMA): 0.35

Best ARIMA Model for TSLA: Order=(0, 1, 0), AIC=76.30
Forecast (ARIMA) for TSLA (first 5 periods): [258.925 258.925 258.925 258.925]
MSE for TSLA (ARIMA): 0.24
No description has been provided for this image
Best ARMA Model for SBUX (Differenced): Order=(0, 0, 0), AIC=-26.74
Forecast (Differenced) for SBUX (first 5 periods): [-0.00379824 -0.00379824 -0.00379824 -0.00379824]
MSE for SBUX (ARMA): 0.07

Best ARIMA Model for SBUX: Order=(0, 0, 2), AIC=-37.17
Forecast (ARIMA) for SBUX (first 5 periods): [104.11594952 104.08658419 104.04379154 104.04379154]
MSE for SBUX (ARIMA): 0.20
No description has been provided for this image
Best ARMA Model for NKE (Differenced): Order=(0, 0, 0), AIC=-49.32
Forecast (Differenced) for NKE (first 5 periods): [-0.01017745 -0.01017745 -0.01017745 -0.01017745]
MSE for NKE (ARMA): 0.00

Best ARIMA Model for NKE: Order=(0, 1, 0), AIC=-51.00
Forecast (ARIMA) for NKE (first 5 periods): [77.54 77.54 77.54 77.54]
MSE for NKE (ARIMA): 0.00
No description has been provided for this image
In [ ]:
# Importar bibliotecas necesarias
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller, kpss, coint
from statsmodels.tsa.seasonal import STL
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings("ignore")

# Importar la biblioteca para subir archivos en Google Colab
from google.colab import files

# Subir el archivo
uploaded = files.upload()

# Listar los archivos para verificar el nombre
!ls

# Especificar el nombre del archivo (ajusta según el nombre que veas en !ls)
excel_file = 'descargas bloomberg (9).xlsx'  # Ajustado al archivo más reciente

# Listar todas las hojas disponibles en el archivo
print("Hojas disponibles en el archivo:", pd.ExcelFile(excel_file).sheet_names)

# Nombres de las hojas y correspondencia con las acciones
sheets_3days = {
    'APPLE': 'AAPL',
    'NVIDIA': 'NVDA',
    'MSFT': 'MSFT',
    'F': 'F'
}

sheets_10days = {
    'APPLE 10 DÍAS': 'AAPL',
    'NVIDIA 10 DÍAS': 'NVDA',
    'MSFT 10 DÍAS': 'MSFT',
    'F 10 DÍAS': 'F'
}

# Leer y procesar cada hoja
df_3days_3min = None
df_10days_30min = None

# Procesar hojas de 3 días
for sheet_name, stock in sheets_3days.items():
    print(f"Procesando hoja (3 días): {sheet_name}")
    # Leer la hoja, usando la primera fila como encabezado
    df = pd.read_excel(excel_file, sheet_name=sheet_name, header=0)

    # Inspeccionar las primeras filas
    print(f"Primeras 5 filas de {sheet_name}:")
    print(df.head())

    # Verificar las columnas disponibles
    print(f"Columnas en {sheet_name}: {df.columns.tolist()}")

    # Crear DataFrame con fechas y precios
    try:
        df_temp = pd.DataFrame({
            'Date': pd.to_datetime(df['Fecha']),
            stock: df['Lst Trd/Lst Px']
        }).dropna()
    except ValueError as e:
        print(f"Error al parsear fechas en {sheet_name}: {e}")
        continue

    print(f"Datos de 3 días para {stock}:")
    print(df_temp.head())

    # Combinar los datos en un solo DataFrame con inner join
    if df_3days_3min is None:
        df_3days_3min = df_temp.set_index('Date')
    else:
        df_3days_3min = df_3days_3min.join(df_temp.set_index('Date'), how='inner')

# Procesar hojas de 10 días
for sheet_name, stock in sheets_10days.items():
    print(f"Procesando hoja (10 días): {sheet_name}")
    # Leer la hoja, usando la primera fila como encabezado
    df = pd.read_excel(excel_file, sheet_name=sheet_name, header=0)

    # Inspeccionar las primeras filas
    print(f"Primeras 5 filas de {sheet_name}:")
    print(df.head())

    # Verificar las columnas disponibles
    print(f"Columnas en {sheet_name}: {df.columns.tolist()}")

    # Crear DataFrame con fechas y precios
    try:
        df_temp = pd.DataFrame({
            'Date': pd.to_datetime(df['Fecha']),
            stock: df['Lst Trd/Lst Px']
        }).dropna()
    except ValueError as e:
        print(f"Error al parsear fechas en {sheet_name}: {e}")
        continue

    print(f"Datos de 10 días para {stock}:")
    print(df_temp.head())

    # Combinar los datos en un solo DataFrame con inner join
    if df_10days_30min is None:
        df_10days_30min = df_temp.set_index('Date')
    else:
        df_10days_30min = df_10days_30min.join(df_temp.set_index('Date'), how='inner')

# Restablecer el índice para usar 'Date' como columna nuevamente
if df_3days_3min is not None and df_10days_30min is not None:
    df_3days_3min = df_3days_3min.reset_index()
    df_10days_30min = df_10days_30min.reset_index()
else:
    print("Error: df_3days_3min o df_10days_30min son None. Revisa la estructura de los datos.")
    raise ValueError("No se pudieron procesar los datos.")

# Lista de acciones a analizar
stocks = ['AAPL', 'NVDA', 'MSFT', 'F']

# Crear diccionarios de series temporales para ambos rangos
series_dict_3days_3min = {}
for stock in stocks:
    series_dict_3days_3min[stock] = pd.Series(df_3days_3min[stock].values, index=df_3days_3min['Date'], name=stock)

series_dict_10days_30min = {}
for stock in stocks:
    series_dict_10days_30min[stock] = pd.Series(df_10days_30min[stock].values, index=df_10days_30min['Date'], name=stock)

# Función para realizar el análisis (STL, ADF/KPSS, Cointegración, ARIMA)
def analyze_time_series(series_dict, title_prefix):
    # 1. Graficar las series originales
    plt.figure(figsize=(14, 7))
    for name, series in series_dict.items():
        plt.plot(series, label=name)
    plt.title(f'{title_prefix} - Intraday Closing Prices')
    plt.xlabel('Time')
    plt.ylabel('Price')
    plt.legend()
    plt.show()

    # 2. Descomposición STL
    for name, series in series_dict.items():
        # Verificar y limpiar NaN antes de STL
        series_clean = series.dropna()
        if len(series_clean) == 0:
            print(f"Error: No hay datos válidos para {name} ({title_prefix}) después de eliminar NaN")
            continue

        # Ajustar el periodo según la frecuencia (aproximado)
        period = 480 if "3 Days" in title_prefix else 48  # 480 para 3 días cada 3 minutos, 48 para 30 minutos
        try:
            stl = STL(series_clean, period=period)
            result = stl.fit()

            plt.figure(figsize=(10, 8))
            plt.subplot(411)
            plt.plot(series_clean, label='Original')
            plt.legend()
            plt.subplot(412)
            plt.plot(result.trend, label='Trend')
            plt.legend()
            plt.subplot(413)
            plt.plot(result.seasonal, label='Seasonal')
            plt.legend()
            plt.subplot(414)
            plt.plot(result.resid, label='Residual')
            plt.legend()
            plt.suptitle(f'STL Decomposition for {name} ({title_prefix})')
            plt.tight_layout()
            plt.show()

            seasonal_std = np.std(result.seasonal)
            series_std = np.std(series_clean)
            print(f"{name} ({title_prefix}): Seasonal std / Series std = {seasonal_std / series_std:.4f}")
        except Exception as e:
            print(f"Error in STL decomposition for {name} ({title_prefix}): {e}")

    # 3. Pruebas de raíz unitaria (ADF y KPSS)
    def adf_test(series, name):
        result = adfuller(series.dropna(), autolag='AIC')
        print(f'\nADF Test for {name} ({title_prefix}): p-value = {result[1]:.4f}')
        return result[1] <= 0.05

    def kpss_test(series, name):
        result = kpss(series.dropna(), regression='c')
        print(f'KPSS Test for {name} ({title_prefix}): p-value = {result[1]:.4f}')
        return result[1] <= 0.05

    for name, series in series_dict.items():
        is_stationary_adf = adf_test(series, name)
        is_stationary_kpss = kpss_test(series, name)
        print(f"{name} ({title_prefix}) Stationarity: ADF={is_stationary_adf}, KPSS={is_stationary_kpss}")

    # 4. Prueba de cointegración
    def cointegration_test(series1, series2, name1, name2):
        series1_clean = series1.dropna()
        series2_clean = series2.dropna()
        aligned_series1, aligned_series2 = series1_clean.align(series2_clean, join='inner')
        if len(aligned_series1) == 0 or len(aligned_series2) == 0:
            print(f"Error: No hay datos alineados para cointegración entre {name1} y {name2}")
            return False
        score, p_value, _ = coint(aligned_series1, aligned_series2)
        print(f'\nCointegration Test {name1} vs {name2} ({title_prefix}): p-value = {p_value:.4f}')
        return p_value < 0.05

    series_list = list(series_dict.items())
    for i in range(len(series_list)):
        for j in range(i + 1, len(series_list)):
            name1, series1 = series_list[i]
            name2, series2 = series_list[j]
            cointegration_test(series1, series2, name1, name2)

    # 5. Modelos ARIMA
    for name, series in series_dict.items():
        series_clean = series.dropna()
        if len(series_clean) == 0:
            print(f"Error: No hay datos válidos para ARIMA en {name} ({title_prefix})")
            continue
        train_size = int(len(series_clean) * 0.9)
        train, test = series_clean[:train_size], series_clean[train_size:]

        # Ajustar ARIMA
        best_aic = float('inf')
        best_order = None
        best_model = None
        for p in range(3):
            for d in range(2):
                for q in range(3):
                    try:
                        model = ARIMA(train, order=(p, d, q))
                        model_fit = model.fit()
                        if model_fit.aic < best_aic:
                            best_aic = model_fit.aic
                            best_order = (p, d, q)
                            best_model = model_fit
                    except:
                        continue

        print(f'\nBest ARIMA for {name} ({title_prefix}): Order={best_order}, AIC={best_aic:.2f}')
        forecast = best_model.forecast(steps=len(test))
        mse = mean_squared_error(test, forecast)
        print(f'MSE for {name} ({title_prefix}): {mse:.2f}')

        plt.figure(figsize=(10, 4))
        plt.plot(train.index, train, label='Train')
        plt.plot(test.index, test, label='Test')
        plt.plot(test.index, forecast, label='Forecast')
        plt.title(f'{name} with ARIMA Forecast ({title_prefix})')
        plt.legend()
        plt.show()

# Analizar ambos rangos de datos
if df_3days_3min is not None and df_10days_30min is not None:
    print("=== Analysis for Last 3 Days (Every 3 Minutes) ===")
    analyze_time_series(series_dict_3days_3min, "Last 3 Days (3 min)")

    print("\n=== Analysis for Last 10 Days (Every 30 Minutes) ===")
    analyze_time_series(series_dict_10days_30min, "Last 10 Days (30 min)")
else:
    print("Error: df_3days_3min o df_10days_30min son None. Revisa la estructura de los datos.")
    raise ValueError("No se pudieron procesar los datos.")
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving descargas bloomberg.xlsx to descargas bloomberg (10).xlsx
'descargas bloomberg (10).xlsx'  'descargas bloomberg (4).xlsx'  'descargas bloomberg (8).xlsx'
'descargas bloomberg (1).xlsx'	 'descargas bloomberg (5).xlsx'  'descargas bloomberg (9).xlsx'
'descargas bloomberg (2).xlsx'	 'descargas bloomberg (6).xlsx'  'descargas bloomberg.xlsx'
'descargas bloomberg (3).xlsx'	 'descargas bloomberg (7).xlsx'   sample_data
Hojas disponibles en el archivo: ['AMAZON', 'APPLE', 'APPLE 10 DÍAS', 'NVIDIA', 'NVIDIA 10 DÍAS', 'MSFT', 'MSFT 10 DÍAS', 'F', 'F 10 DÍAS', 'TSLA', 'SBUX', 'NKE']
Procesando hoja (3 días): APPLE
Primeras 5 filas de APPLE:
                Fecha  Lst Trd/Lst Px    Volume SMAVG (15)
0 2025-03-07 11:21:00          239.03  110.946k    241.44k
1 2025-03-07 11:18:00          238.89  183.729k   250.773k
2 2025-03-07 11:15:00          238.62  115.774k   257.897k
3 2025-03-07 11:12:00          238.70  167.875k   269.943k
4 2025-03-07 11:09:00          238.11  245.826k   275.014k
Columnas en APPLE: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 3 días para AAPL:
                 Date    AAPL
0 2025-03-07 11:21:00  239.03
1 2025-03-07 11:18:00  238.89
2 2025-03-07 11:15:00  238.62
3 2025-03-07 11:12:00  238.70
4 2025-03-07 11:09:00  238.11
Procesando hoja (3 días): NVIDIA
Primeras 5 filas de NVIDIA:
                Fecha  Lst Trd/Lst Px   Volume SMAVG (15)
0 2025-03-07 11:24:00        108.8100  884.24k     2.067M
1 2025-03-07 11:21:00        108.6499   1.686M      2.21M
2 2025-03-07 11:18:00        108.2400   1.361M     2.353M
3 2025-03-07 11:15:00        107.9900   1.241M     2.362M
4 2025-03-07 11:12:00        108.2450    1.66M     2.377M
Columnas en NVIDIA: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 3 días para NVDA:
                 Date      NVDA
0 2025-03-07 11:24:00  108.8100
1 2025-03-07 11:21:00  108.6499
2 2025-03-07 11:18:00  108.2400
3 2025-03-07 11:15:00  107.9900
4 2025-03-07 11:12:00  108.2450
Procesando hoja (3 días): MSFT
Primeras 5 filas de MSFT:
                Fecha  Lst Trd/Lst Px Volume SMAVG (15)
0 2025-03-07 11:30:00         387.750   8352      66633
1 2025-03-07 11:27:00         387.340  56145      72417
2 2025-03-07 11:24:00         387.200  45427      75017
3 2025-03-07 11:21:00         386.635  81545      79070
4 2025-03-07 11:18:00         386.050  47115      79166
Columnas en MSFT: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 3 días para MSFT:
                 Date     MSFT
0 2025-03-07 11:30:00  387.750
1 2025-03-07 11:27:00  387.340
2 2025-03-07 11:24:00  387.200
3 2025-03-07 11:21:00  386.635
4 2025-03-07 11:18:00  386.050
Procesando hoja (3 días): F
Primeras 5 filas de F:
                Fecha  Lst Trd/Lst Px    Volume SMAVG (15)
0 2025-03-07 11:36:00          9.8001  118.124k   779.909k
1 2025-03-07 11:33:00          9.7999  667.432k   807.469k
2 2025-03-07 11:30:00          9.7800  601.267k   801.849k
3 2025-03-07 11:27:00          9.7600  688.325k    841.22k
4 2025-03-07 11:24:00          9.7350  448.437k   863.239k
Columnas en F: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 3 días para F:
                 Date       F
0 2025-03-07 11:36:00  9.8001
1 2025-03-07 11:33:00  9.7999
2 2025-03-07 11:30:00  9.7800
3 2025-03-07 11:27:00  9.7600
4 2025-03-07 11:24:00  9.7350
Procesando hoja (10 días): APPLE 10 DÍAS
Primeras 5 filas de APPLE 10 DÍAS:
                Fecha  Lst Trd/Lst Px    Volume SMAVG (15)
0 2025-03-07 11:30:00         239.385  413.564k     2.586M
1 2025-03-07 11:00:00         239.160    2.066M     2.658M
2 2025-03-07 10:30:00         239.235    2.825M     2.621M
3 2025-03-07 10:00:00         239.095    2.202M     2.548M
4 2025-03-07 09:30:00         238.930    2.842M     2.558M
Columnas en APPLE 10 DÍAS: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 10 días para AAPL:
                 Date     AAPL
0 2025-03-07 11:30:00  239.385
1 2025-03-07 11:00:00  239.160
2 2025-03-07 10:30:00  239.235
3 2025-03-07 10:00:00  239.095
4 2025-03-07 09:30:00  238.930
Procesando hoja (10 días): NVIDIA 10 DÍAS
Primeras 5 filas de NVIDIA 10 DÍAS:
                Fecha  Lst Trd/Lst Px   Volume SMAVG (15)
0 2025-03-07 11:30:00        109.5850   2.224M    19.998M
1 2025-03-07 11:00:00        108.8400  16.316M    20.847M
2 2025-03-07 10:30:00        107.9299  26.742M    20.593M
3 2025-03-07 10:00:00        109.7600  15.596M     19.69M
4 2025-03-07 09:30:00        111.1500  29.536M     19.99M
Columnas en NVIDIA 10 DÍAS: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 10 días para NVDA:
                 Date      NVDA
0 2025-03-07 11:30:00  109.5850
1 2025-03-07 11:00:00  108.8400
2 2025-03-07 10:30:00  107.9299
3 2025-03-07 10:00:00  109.7600
4 2025-03-07 09:30:00  111.1500
Procesando hoja (10 días): MSFT 10 DÍAS
Primeras 5 filas de MSFT 10 DÍAS:
                Fecha  Lst Trd/Lst Px    Volume SMAVG (15)
0 2025-03-07 11:30:00        387.3100     32107     1.041M
1 2025-03-07 11:00:00        387.3400  661.803k     1.078M
2 2025-03-07 10:30:00        386.7075   882.52k     1.082M
3 2025-03-07 10:00:00        388.1000  645.911k     1.071M
4 2025-03-07 09:30:00        390.7500    1.044M     1.108M
Columnas en MSFT 10 DÍAS: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 10 días para MSFT:
                 Date      MSFT
0 2025-03-07 11:30:00  387.3100
1 2025-03-07 11:00:00  387.3400
2 2025-03-07 10:30:00  386.7075
3 2025-03-07 10:00:00  388.1000
4 2025-03-07 09:30:00  390.7500
Procesando hoja (10 días): F 10 DÍAS
Primeras 5 filas de F 10 DÍAS:
                Fecha  Lst Trd/Lst Px   Volume SMAVG (15)
0 2025-03-07 11:30:00          9.8100   1.609M     9.993M
1 2025-03-07 11:00:00          9.7600   8.258M    10.761M
2 2025-03-07 10:30:00          9.7150  11.066M    10.766M
3 2025-03-07 10:00:00          9.7943   9.683M    10.721M
4 2025-03-07 09:30:00          9.8350  21.069M     10.89M
Columnas en F 10 DÍAS: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 10 días para F:
                 Date       F
0 2025-03-07 11:30:00  9.8100
1 2025-03-07 11:00:00  9.7600
2 2025-03-07 10:30:00  9.7150
3 2025-03-07 10:00:00  9.7943
4 2025-03-07 09:30:00  9.8350
=== Analysis for Last 3 Days (Every 3 Minutes) ===
No description has been provided for this image
No description has been provided for this image
AAPL (Last 3 Days (3 min)): Seasonal std / Series std = 1.0000
No description has been provided for this image
NVDA (Last 3 Days (3 min)): Seasonal std / Series std = 1.0000
No description has been provided for this image
MSFT (Last 3 Days (3 min)): Seasonal std / Series std = 1.0000
No description has been provided for this image
F (Last 3 Days (3 min)): Seasonal std / Series std = 1.0000

ADF Test for AAPL (Last 3 Days (3 min)): p-value = 0.4023
KPSS Test for AAPL (Last 3 Days (3 min)): p-value = 0.0593
AAPL (Last 3 Days (3 min)) Stationarity: ADF=False, KPSS=False

ADF Test for NVDA (Last 3 Days (3 min)): p-value = 0.2327
KPSS Test for NVDA (Last 3 Days (3 min)): p-value = 0.0100
NVDA (Last 3 Days (3 min)) Stationarity: ADF=False, KPSS=True

ADF Test for MSFT (Last 3 Days (3 min)): p-value = 0.8522
KPSS Test for MSFT (Last 3 Days (3 min)): p-value = 0.0100
MSFT (Last 3 Days (3 min)) Stationarity: ADF=False, KPSS=True

ADF Test for F (Last 3 Days (3 min)): p-value = 0.8124
KPSS Test for F (Last 3 Days (3 min)): p-value = 0.0100
F (Last 3 Days (3 min)) Stationarity: ADF=False, KPSS=True

Cointegration Test AAPL vs NVDA (Last 3 Days (3 min)): p-value = 0.5942

Cointegration Test AAPL vs MSFT (Last 3 Days (3 min)): p-value = 0.3644

Cointegration Test AAPL vs F (Last 3 Days (3 min)): p-value = 0.5458

Cointegration Test NVDA vs MSFT (Last 3 Days (3 min)): p-value = 0.5100

Cointegration Test NVDA vs F (Last 3 Days (3 min)): p-value = 0.5173

Cointegration Test MSFT vs F (Last 3 Days (3 min)): p-value = 0.2781

Best ARIMA for AAPL (Last 3 Days (3 min)): Order=(0, 1, 0), AIC=367.55
MSE for AAPL (Last 3 Days (3 min)): 0.20
No description has been provided for this image
Best ARIMA for NVDA (Last 3 Days (3 min)): Order=(2, 1, 0), AIC=445.27
MSE for NVDA (Last 3 Days (3 min)): 12.09
No description has been provided for this image
Best ARIMA for MSFT (Last 3 Days (3 min)): Order=(2, 1, 2), AIC=750.94
MSE for MSFT (Last 3 Days (3 min)): 1.40
No description has been provided for this image
Best ARIMA for F (Last 3 Days (3 min)): Order=(0, 1, 0), AIC=-1830.40
MSE for F (Last 3 Days (3 min)): 0.00
No description has been provided for this image
=== Analysis for Last 10 Days (Every 30 Minutes) ===
No description has been provided for this image
No description has been provided for this image
AAPL (Last 10 Days (30 min)): Seasonal std / Series std = 0.4350
No description has been provided for this image
NVDA (Last 10 Days (30 min)): Seasonal std / Series std = 0.2562
No description has been provided for this image
MSFT (Last 10 Days (30 min)): Seasonal std / Series std = 0.5260
No description has been provided for this image
F (Last 10 Days (30 min)): Seasonal std / Series std = 0.9377

ADF Test for AAPL (Last 10 Days (30 min)): p-value = 0.6997
KPSS Test for AAPL (Last 10 Days (30 min)): p-value = 0.0100
AAPL (Last 10 Days (30 min)) Stationarity: ADF=False, KPSS=True

ADF Test for NVDA (Last 10 Days (30 min)): p-value = 0.8898
KPSS Test for NVDA (Last 10 Days (30 min)): p-value = 0.0100
NVDA (Last 10 Days (30 min)) Stationarity: ADF=False, KPSS=True

ADF Test for MSFT (Last 10 Days (30 min)): p-value = 0.8289
KPSS Test for MSFT (Last 10 Days (30 min)): p-value = 0.0100
MSFT (Last 10 Days (30 min)) Stationarity: ADF=False, KPSS=True

ADF Test for F (Last 10 Days (30 min)): p-value = 0.0498
KPSS Test for F (Last 10 Days (30 min)): p-value = 0.0836
F (Last 10 Days (30 min)) Stationarity: ADF=True, KPSS=False

Cointegration Test AAPL vs NVDA (Last 10 Days (30 min)): p-value = 0.3201

Cointegration Test AAPL vs MSFT (Last 10 Days (30 min)): p-value = 0.5892

Cointegration Test AAPL vs F (Last 10 Days (30 min)): p-value = 0.7830

Cointegration Test NVDA vs MSFT (Last 10 Days (30 min)): p-value = 0.6511

Cointegration Test NVDA vs F (Last 10 Days (30 min)): p-value = 0.9000

Cointegration Test MSFT vs F (Last 10 Days (30 min)): p-value = 0.9290

Best ARIMA for AAPL (Last 10 Days (30 min)): Order=(0, 1, 0), AIC=371.16
MSE for AAPL (Last 10 Days (30 min)): 0.97
No description has been provided for this image
Best ARIMA for NVDA (Last 10 Days (30 min)): Order=(1, 1, 0), AIC=442.47
MSE for NVDA (Last 10 Days (30 min)): 25.88
No description has been provided for this image
Best ARIMA for MSFT (Last 10 Days (30 min)): Order=(0, 1, 0), AIC=457.43
MSE for MSFT (Last 10 Days (30 min)): 63.01
No description has been provided for this image
Best ARIMA for F (Last 10 Days (30 min)): Order=(2, 0, 1), AIC=-306.16
MSE for F (Last 10 Days (30 min)): 0.01
No description has been provided for this image
In [ ]:
# Importar bibliotecas necesarias
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller, kpss, coint
from statsmodels.tsa.seasonal import STL
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings("ignore")

# Importar la biblioteca para subir archivos en Google Colab
from google.colab import files

# Subir el archivo
uploaded = files.upload()

# Listar los archivos para verificar el nombre
!ls

# Especificar el nombre del archivo (ajusta según el nombre que veas en !ls)
excel_file = 'descargas bloomberg.xlsx'  # Ajustado al nombre del archivo subido

# Listar todas las hojas disponibles en el archivo
print("Hojas disponibles en el archivo:", pd.ExcelFile(excel_file).sheet_names)

# Nombres de las hojas y correspondencia con las acciones
sheets_3days = {
    'APPLE': 'AAPL',
    'NVIDIA': 'NVDA',
    'MSFT': 'MSFT',
    'F': 'F'
}

sheets_10days = {
    'APPLE 10 DÍAS': 'AAPL',
    'NVIDIA 10 DÍAS': 'NVDA',
    'MSFT 10 DÍAS': 'MSFT',
    'F 10 DÍAS': 'F'
}

# Leer y procesar cada hoja
df_3days_3min = None
df_10days_30min = None

# Procesar hojas de 3 días
for sheet_name, stock in sheets_3days.items():
    print(f"Procesando hoja (3 días): {sheet_name}")
    df = pd.read_excel(excel_file, sheet_name=sheet_name, header=0)
    print(f"Primeras 5 filas de {sheet_name}:")
    print(df.head())
    print(f"Columnas en {sheet_name}: {df.columns.tolist()}")
    try:
        df_temp = pd.DataFrame({
            'Date': pd.to_datetime(df['Fecha']),
            stock: df['Lst Trd/Lst Px']
        }).dropna()
    except ValueError as e:
        print(f"Error al parsear fechas en {sheet_name}: {e}")
        continue
    print(f"Datos de 3 días para {stock}:")
    print(df_temp.head())
    if df_3days_3min is None:
        df_3days_3min = df_temp.set_index('Date')
    else:
        df_3days_3min = df_3days_3min.join(df_temp.set_index('Date'), how='inner')

# Procesar hojas de 10 días
for sheet_name, stock in sheets_10days.items():
    print(f"Procesando hoja (10 días): {sheet_name}")
    df = pd.read_excel(excel_file, sheet_name=sheet_name, header=0)
    print(f"Primeras 5 filas de {sheet_name}:")
    print(df.head())
    print(f"Columnas en {sheet_name}: {df.columns.tolist()}")
    try:
        df_temp = pd.DataFrame({
            'Date': pd.to_datetime(df['Fecha']),
            stock: df['Lst Trd/Lst Px']
        }).dropna()
    except ValueError as e:
        print(f"Error al parsear fechas en {sheet_name}: {e}")
        continue
    print(f"Datos de 10 días para {stock}:")
    print(df_temp.head())
    if df_10days_30min is None:
        df_10days_30min = df_temp.set_index('Date')
    else:
        df_10days_30min = df_10days_30min.join(df_temp.set_index('Date'), how='inner')

# Restablecer el índice para usar 'Date' como columna nuevamente
if df_3days_3min is not None and df_10days_30min is not None:
    df_3days_3min = df_3days_3min.reset_index()
    df_10days_30min = df_10days_30min.reset_index()
else:
    print("Error: df_3days_3min o df_10days_30min son None. Revisa la estructura de los datos.")
    raise ValueError("No se pudieron procesar los datos.")

# Lista de acciones a analizar
stocks = ['AAPL', 'NVDA', 'MSFT', 'F']

# Crear diccionarios de series temporales para ambos rangos
series_dict_3days_3min = {}
for stock in stocks:
    series_dict_3days_3min[stock] = pd.Series(df_3days_3min[stock].values, index=df_3days_3min['Date'], name=stock)

series_dict_10days_30min = {}
for stock in stocks:
    series_dict_10days_30min[stock] = pd.Series(df_10days_30min[stock].values, index=df_10days_30min['Date'], name=stock)

# Estructura para almacenar resultados
results_3days = {'STL': {}, 'ADF': {}, 'KPSS': {}, 'Cointegration': [], 'ARIMA': {}}
results_10days = {'STL': {}, 'ADF': {}, 'KPSS': {}, 'Cointegration': [], 'ARIMA': {}}

# Función para realizar el análisis (STL, ADF/KPSS, Cointegración, ARIMA)
def analyze_time_series(series_dict, title_prefix, results_dict):
    # 1. Graficar las series originales
    plt.figure(figsize=(14, 7))
    for name, series in series_dict.items():
        plt.plot(series, label=name)
    plt.title(f'{title_prefix} - Intraday Closing Prices')
    plt.xlabel('Time')
    plt.ylabel('Price')
    plt.legend()
    plt.show()

    # 2. Descomposición STL
    for name, series in series_dict.items():
        series_clean = series.dropna()
        if len(series_clean) == 0:
            print(f"Error: No hay datos válidos para {name} ({title_prefix}) después de eliminar NaN")
            continue
        period = 48 if "3 Days" in title_prefix else 24  # Ajuste del período
        try:
            stl = STL(series_clean, period=period)
            result = stl.fit()
            plt.figure(figsize=(10, 8))
            plt.subplot(411)
            plt.plot(series_clean, label='Original')
            plt.legend()
            plt.subplot(412)
            plt.plot(result.trend, label='Trend')
            plt.legend()
            plt.subplot(413)
            plt.plot(result.seasonal, label='Seasonal')
            plt.legend()
            plt.subplot(414)
            plt.plot(result.resid, label='Residual')
            plt.legend()
            plt.suptitle(f'STL Decomposition for {name} ({title_prefix})')
            plt.tight_layout()
            plt.show()
            seasonal_std = np.std(result.seasonal)
            series_std = np.std(series_clean)
            ratio = seasonal_std / series_std
            print(f"{name} ({title_prefix}): Seasonal std / Series std = {ratio:.4f}")
            results_dict['STL'][name] = ratio
        except Exception as e:
            print(f"Error in STL decomposition for {name} ({title_prefix}): {e}")

    # 3. Pruebas de raíz unitaria (ADF y KPSS)
    def adf_test(series, name):
        result = adfuller(series.dropna(), autolag='AIC')
        p_value = result[1]
        print(f'\nADF Test for {name} ({title_prefix}): p-value = {p_value:.4f}')
        results_dict['ADF'][name] = p_value
        return p_value <= 0.05

    def kpss_test(series, name):
        result = kpss(series.dropna(), regression='c')
        p_value = result[1]
        print(f'KPSS Test for {name} ({title_prefix}): p-value = {p_value:.4f}')
        results_dict['KPSS'][name] = p_value
        return p_value <= 0.05

    for name, series in series_dict.items():
        is_stationary_adf = adf_test(series, name)
        is_stationary_kpss = kpss_test(series, name)
        print(f"{name} ({title_prefix}) Stationarity: ADF={is_stationary_adf}, KPSS={is_stationary_kpss}")

    # 4. Prueba de cointegración
    def cointegration_test(series1, series2, name1, name2):
        series1_clean = series1.dropna()
        series2_clean = series2.dropna()
        aligned_series1, aligned_series2 = series1_clean.align(series2_clean, join='inner')
        if len(aligned_series1) == 0 or len(aligned_series2) == 0:
            print(f"Error: No hay datos alineados para cointegración entre {name1} y {name2}")
            return False
        score, p_value, _ = coint(aligned_series1, aligned_series2)
        print(f'\nCointegration Test {name1} vs {name2} ({title_prefix}): p-value = {p_value:.4f}')
        results_dict['Cointegration'].append((name1, name2, p_value))
        return p_value < 0.05

    series_list = list(series_dict.items())
    for i in range(len(series_list)):
        for j in range(i + 1, len(series_list)):
            name1, series1 = series_list[i]
            name2, series2 = series_list[j]
            cointegration_test(series1, series2, name1, name2)

    # 5. Modelos ARIMA
    for name, series in series_dict.items():
        series_clean = series.dropna()
        if len(series_clean) == 0:
            print(f"Error: No hay datos válidos para ARIMA en {name} ({title_prefix})")
            continue
        train_size = int(len(series_clean) * 0.9)
        train, test = series_clean[:train_size], series_clean[train_size:]
        best_aic = float('inf')
        best_order = None
        best_model = None
        for p in range(5):  # Ampliar rango
            for d in range(2):
                for q in range(5):
                    try:
                        model = ARIMA(train, order=(p, d, q))
                        model_fit = model.fit()
                        if model_fit.aic < best_aic:
                            best_aic = model_fit.aic
                            best_order = (p, d, q)
                            best_model = model_fit
                    except:
                        continue
        print(f'\nBest ARIMA for {name} ({title_prefix}): Order={best_order}, AIC={best_aic:.2f}')
        forecast = best_model.forecast(steps=len(test))
        mse = mean_squared_error(test, forecast)
        print(f'MSE for {name} ({title_prefix}): {mse:.2f}')
        plt.figure(figsize=(10, 4))
        plt.plot(train.index, train, label='Train')
        plt.plot(test.index, test, label='Test')
        plt.plot(test.index, forecast, label='Forecast')
        plt.title(f'{name} with ARIMA Forecast ({title_prefix})')
        plt.legend()
        plt.show()
        results_dict['ARIMA'][name] = (best_order, best_aic, mse)

# Analizar ambos rangos de datos
if df_3days_3min is not None and df_10days_30min is not None:
    print("=== Analysis for Last 3 Days (Every 3 Minutes) ===")
    analyze_time_series(series_dict_3days_3min, "Last 3 Days (3 min)", results_3days)

    print("\n=== Analysis for Last 10 Days (Every 30 Minutes) ===")
    analyze_time_series(series_dict_10days_30min, "Last 10 Days (30 min)", results_10days)
else:
    print("Error: df_3days_3min o df_10days_30min son None. Revisa la estructura de los datos.")
    raise ValueError("No se pudieron procesar los datos.")

# Resumen de resultados
def print_summary(results, title):
    print(f"\n=== Resumen de Resultados para {title} ===")
    print("\n**STL Decomposition (Seasonal std / Series std):**")
    for stock, ratio in results['STL'].items():
        print(f"{stock}: {ratio:.4f}")
    print("\n**ADF Test (p-values):**")
    for stock, p_value in results['ADF'].items():
        print(f"{stock}: {p_value:.4f} {'(Estacionaria)' if p_value <= 0.05 else '(No Estacionaria)'}")
    print("\n**KPSS Test (p-values):**")
    for stock, p_value in results['KPSS'].items():
        print(f"{stock}: {p_value:.4f} {'(Estacionaria)' if p_value > 0.05 else '(No Estacionaria)'}")
    print("\n**Cointegration Tests (p-values):**")
    for name1, name2, p_value in results['Cointegration']:
        print(f"{name1} vs {name2}: {p_value:.4f} {'(Cointegradas)' if p_value < 0.05 else '(No Cointegradas)'}")
    print("\n**ARIMA Models:**")
    for stock, (order, aic, mse) in results['ARIMA'].items():
        print(f"{stock}: Order={order}, AIC={aic:.2f}, MSE={mse:.2f}")

print_summary(results_3days, "Last 3 Days (Every 3 Minutes)")
print_summary(results_10days, "Last 10 Days (Every 30 Minutes)")
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving descargas bloomberg.xlsx to descargas bloomberg (1).xlsx
'descargas bloomberg (1).xlsx'	'descargas bloomberg.xlsx'   sample_data
Hojas disponibles en el archivo: ['AMAZON', 'APPLE', 'APPLE 10 DÍAS', 'NVIDIA', 'NVIDIA 10 DÍAS', 'MSFT', 'MSFT 10 DÍAS', 'F', 'F 10 DÍAS', 'TSLA', 'SBUX', 'NKE']
Procesando hoja (3 días): APPLE
Primeras 5 filas de APPLE:
                Fecha  Lst Trd/Lst Px    Volume SMAVG (15)
0 2025-03-07 11:21:00          239.03  110.946k    241.44k
1 2025-03-07 11:18:00          238.89  183.729k   250.773k
2 2025-03-07 11:15:00          238.62  115.774k   257.897k
3 2025-03-07 11:12:00          238.70  167.875k   269.943k
4 2025-03-07 11:09:00          238.11  245.826k   275.014k
Columnas en APPLE: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 3 días para AAPL:
                 Date    AAPL
0 2025-03-07 11:21:00  239.03
1 2025-03-07 11:18:00  238.89
2 2025-03-07 11:15:00  238.62
3 2025-03-07 11:12:00  238.70
4 2025-03-07 11:09:00  238.11
Procesando hoja (3 días): NVIDIA
Primeras 5 filas de NVIDIA:
                Fecha  Lst Trd/Lst Px   Volume SMAVG (15)
0 2025-03-07 11:24:00        108.8100  884.24k     2.067M
1 2025-03-07 11:21:00        108.6499   1.686M      2.21M
2 2025-03-07 11:18:00        108.2400   1.361M     2.353M
3 2025-03-07 11:15:00        107.9900   1.241M     2.362M
4 2025-03-07 11:12:00        108.2450    1.66M     2.377M
Columnas en NVIDIA: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 3 días para NVDA:
                 Date      NVDA
0 2025-03-07 11:24:00  108.8100
1 2025-03-07 11:21:00  108.6499
2 2025-03-07 11:18:00  108.2400
3 2025-03-07 11:15:00  107.9900
4 2025-03-07 11:12:00  108.2450
Procesando hoja (3 días): MSFT
Primeras 5 filas de MSFT:
                Fecha  Lst Trd/Lst Px Volume SMAVG (15)
0 2025-03-07 11:30:00         387.750   8352      66633
1 2025-03-07 11:27:00         387.340  56145      72417
2 2025-03-07 11:24:00         387.200  45427      75017
3 2025-03-07 11:21:00         386.635  81545      79070
4 2025-03-07 11:18:00         386.050  47115      79166
Columnas en MSFT: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 3 días para MSFT:
                 Date     MSFT
0 2025-03-07 11:30:00  387.750
1 2025-03-07 11:27:00  387.340
2 2025-03-07 11:24:00  387.200
3 2025-03-07 11:21:00  386.635
4 2025-03-07 11:18:00  386.050
Procesando hoja (3 días): F
Primeras 5 filas de F:
                Fecha  Lst Trd/Lst Px    Volume SMAVG (15)
0 2025-03-07 11:36:00          9.8001  118.124k   779.909k
1 2025-03-07 11:33:00          9.7999  667.432k   807.469k
2 2025-03-07 11:30:00          9.7800  601.267k   801.849k
3 2025-03-07 11:27:00          9.7600  688.325k    841.22k
4 2025-03-07 11:24:00          9.7350  448.437k   863.239k
Columnas en F: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 3 días para F:
                 Date       F
0 2025-03-07 11:36:00  9.8001
1 2025-03-07 11:33:00  9.7999
2 2025-03-07 11:30:00  9.7800
3 2025-03-07 11:27:00  9.7600
4 2025-03-07 11:24:00  9.7350
Procesando hoja (10 días): APPLE 10 DÍAS
Primeras 5 filas de APPLE 10 DÍAS:
                Fecha  Lst Trd/Lst Px    Volume SMAVG (15)
0 2025-03-07 11:30:00         239.385  413.564k     2.586M
1 2025-03-07 11:00:00         239.160    2.066M     2.658M
2 2025-03-07 10:30:00         239.235    2.825M     2.621M
3 2025-03-07 10:00:00         239.095    2.202M     2.548M
4 2025-03-07 09:30:00         238.930    2.842M     2.558M
Columnas en APPLE 10 DÍAS: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 10 días para AAPL:
                 Date     AAPL
0 2025-03-07 11:30:00  239.385
1 2025-03-07 11:00:00  239.160
2 2025-03-07 10:30:00  239.235
3 2025-03-07 10:00:00  239.095
4 2025-03-07 09:30:00  238.930
Procesando hoja (10 días): NVIDIA 10 DÍAS
Primeras 5 filas de NVIDIA 10 DÍAS:
                Fecha  Lst Trd/Lst Px   Volume SMAVG (15)
0 2025-03-07 11:30:00        109.5850   2.224M    19.998M
1 2025-03-07 11:00:00        108.8400  16.316M    20.847M
2 2025-03-07 10:30:00        107.9299  26.742M    20.593M
3 2025-03-07 10:00:00        109.7600  15.596M     19.69M
4 2025-03-07 09:30:00        111.1500  29.536M     19.99M
Columnas en NVIDIA 10 DÍAS: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 10 días para NVDA:
                 Date      NVDA
0 2025-03-07 11:30:00  109.5850
1 2025-03-07 11:00:00  108.8400
2 2025-03-07 10:30:00  107.9299
3 2025-03-07 10:00:00  109.7600
4 2025-03-07 09:30:00  111.1500
Procesando hoja (10 días): MSFT 10 DÍAS
Primeras 5 filas de MSFT 10 DÍAS:
                Fecha  Lst Trd/Lst Px    Volume SMAVG (15)
0 2025-03-07 11:30:00        387.3100     32107     1.041M
1 2025-03-07 11:00:00        387.3400  661.803k     1.078M
2 2025-03-07 10:30:00        386.7075   882.52k     1.082M
3 2025-03-07 10:00:00        388.1000  645.911k     1.071M
4 2025-03-07 09:30:00        390.7500    1.044M     1.108M
Columnas en MSFT 10 DÍAS: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 10 días para MSFT:
                 Date      MSFT
0 2025-03-07 11:30:00  387.3100
1 2025-03-07 11:00:00  387.3400
2 2025-03-07 10:30:00  386.7075
3 2025-03-07 10:00:00  388.1000
4 2025-03-07 09:30:00  390.7500
Procesando hoja (10 días): F 10 DÍAS
Primeras 5 filas de F 10 DÍAS:
                Fecha  Lst Trd/Lst Px   Volume SMAVG (15)
0 2025-03-07 11:30:00          9.8100   1.609M     9.993M
1 2025-03-07 11:00:00          9.7600   8.258M    10.761M
2 2025-03-07 10:30:00          9.7150  11.066M    10.766M
3 2025-03-07 10:00:00          9.7943   9.683M    10.721M
4 2025-03-07 09:30:00          9.8350  21.069M     10.89M
Columnas en F 10 DÍAS: ['Fecha', 'Lst Trd/Lst Px', 'Volume', 'SMAVG (15)']
Datos de 10 días para F:
                 Date       F
0 2025-03-07 11:30:00  9.8100
1 2025-03-07 11:00:00  9.7600
2 2025-03-07 10:30:00  9.7150
3 2025-03-07 10:00:00  9.7943
4 2025-03-07 09:30:00  9.8350
=== Analysis for Last 3 Days (Every 3 Minutes) ===
No description has been provided for this image
No description has been provided for this image
AAPL (Last 3 Days (3 min)): Seasonal std / Series std = 0.1577
No description has been provided for this image
NVDA (Last 3 Days (3 min)): Seasonal std / Series std = 0.1703
No description has been provided for this image
MSFT (Last 3 Days (3 min)): Seasonal std / Series std = 0.1451
No description has been provided for this image
F (Last 3 Days (3 min)): Seasonal std / Series std = 0.1499

ADF Test for AAPL (Last 3 Days (3 min)): p-value = 0.4023
KPSS Test for AAPL (Last 3 Days (3 min)): p-value = 0.0593
AAPL (Last 3 Days (3 min)) Stationarity: ADF=False, KPSS=False

ADF Test for NVDA (Last 3 Days (3 min)): p-value = 0.2327
KPSS Test for NVDA (Last 3 Days (3 min)): p-value = 0.0100
NVDA (Last 3 Days (3 min)) Stationarity: ADF=False, KPSS=True

ADF Test for MSFT (Last 3 Days (3 min)): p-value = 0.8522
KPSS Test for MSFT (Last 3 Days (3 min)): p-value = 0.0100
MSFT (Last 3 Days (3 min)) Stationarity: ADF=False, KPSS=True

ADF Test for F (Last 3 Days (3 min)): p-value = 0.8124
KPSS Test for F (Last 3 Days (3 min)): p-value = 0.0100
F (Last 3 Days (3 min)) Stationarity: ADF=False, KPSS=True

Cointegration Test AAPL vs NVDA (Last 3 Days (3 min)): p-value = 0.5942

Cointegration Test AAPL vs MSFT (Last 3 Days (3 min)): p-value = 0.3644

Cointegration Test AAPL vs F (Last 3 Days (3 min)): p-value = 0.5458

Cointegration Test NVDA vs MSFT (Last 3 Days (3 min)): p-value = 0.5100

Cointegration Test NVDA vs F (Last 3 Days (3 min)): p-value = 0.5173

Cointegration Test MSFT vs F (Last 3 Days (3 min)): p-value = 0.2781

Best ARIMA for AAPL (Last 3 Days (3 min)): Order=(0, 1, 0), AIC=367.55
MSE for AAPL (Last 3 Days (3 min)): 0.20
No description has been provided for this image
Best ARIMA for NVDA (Last 3 Days (3 min)): Order=(2, 1, 0), AIC=445.27
MSE for NVDA (Last 3 Days (3 min)): 12.09
No description has been provided for this image
Best ARIMA for MSFT (Last 3 Days (3 min)): Order=(2, 1, 2), AIC=750.94
MSE for MSFT (Last 3 Days (3 min)): 1.40
No description has been provided for this image
Best ARIMA for F (Last 3 Days (3 min)): Order=(0, 1, 0), AIC=-1830.40
MSE for F (Last 3 Days (3 min)): 0.00
No description has been provided for this image
=== Analysis for Last 10 Days (Every 30 Minutes) ===
No description has been provided for this image
No description has been provided for this image
AAPL (Last 10 Days (30 min)): Seasonal std / Series std = 0.1933
No description has been provided for this image
NVDA (Last 10 Days (30 min)): Seasonal std / Series std = 0.1148
No description has been provided for this image
MSFT (Last 10 Days (30 min)): Seasonal std / Series std = 0.2096
No description has been provided for this image
F (Last 10 Days (30 min)): Seasonal std / Series std = 0.3838

ADF Test for AAPL (Last 10 Days (30 min)): p-value = 0.6997
KPSS Test for AAPL (Last 10 Days (30 min)): p-value = 0.0100
AAPL (Last 10 Days (30 min)) Stationarity: ADF=False, KPSS=True

ADF Test for NVDA (Last 10 Days (30 min)): p-value = 0.8898
KPSS Test for NVDA (Last 10 Days (30 min)): p-value = 0.0100
NVDA (Last 10 Days (30 min)) Stationarity: ADF=False, KPSS=True

ADF Test for MSFT (Last 10 Days (30 min)): p-value = 0.8289
KPSS Test for MSFT (Last 10 Days (30 min)): p-value = 0.0100
MSFT (Last 10 Days (30 min)) Stationarity: ADF=False, KPSS=True

ADF Test for F (Last 10 Days (30 min)): p-value = 0.0498
KPSS Test for F (Last 10 Days (30 min)): p-value = 0.0836
F (Last 10 Days (30 min)) Stationarity: ADF=True, KPSS=False

Cointegration Test AAPL vs NVDA (Last 10 Days (30 min)): p-value = 0.3201

Cointegration Test AAPL vs MSFT (Last 10 Days (30 min)): p-value = 0.5892

Cointegration Test AAPL vs F (Last 10 Days (30 min)): p-value = 0.7830

Cointegration Test NVDA vs MSFT (Last 10 Days (30 min)): p-value = 0.6511

Cointegration Test NVDA vs F (Last 10 Days (30 min)): p-value = 0.9000

Cointegration Test MSFT vs F (Last 10 Days (30 min)): p-value = 0.9290

Best ARIMA for AAPL (Last 10 Days (30 min)): Order=(0, 1, 0), AIC=371.16
MSE for AAPL (Last 10 Days (30 min)): 0.97
No description has been provided for this image
Best ARIMA for NVDA (Last 10 Days (30 min)): Order=(1, 1, 0), AIC=442.47
MSE for NVDA (Last 10 Days (30 min)): 25.88
No description has been provided for this image
Best ARIMA for MSFT (Last 10 Days (30 min)): Order=(0, 1, 0), AIC=457.43
MSE for MSFT (Last 10 Days (30 min)): 63.01
No description has been provided for this image
Best ARIMA for F (Last 10 Days (30 min)): Order=(3, 0, 2), AIC=-306.38
MSE for F (Last 10 Days (30 min)): 0.03
No description has been provided for this image
=== Resumen de Resultados para Last 3 Days (Every 3 Minutes) ===

**STL Decomposition (Seasonal std / Series std):**
AAPL: 0.1577
NVDA: 0.1703
MSFT: 0.1451
F: 0.1499

**ADF Test (p-values):**
AAPL: 0.4023 (No Estacionaria)
NVDA: 0.2327 (No Estacionaria)
MSFT: 0.8522 (No Estacionaria)
F: 0.8124 (No Estacionaria)

**KPSS Test (p-values):**
AAPL: 0.0593 (Estacionaria)
NVDA: 0.0100 (No Estacionaria)
MSFT: 0.0100 (No Estacionaria)
F: 0.0100 (No Estacionaria)

**Cointegration Tests (p-values):**
AAPL vs NVDA: 0.5942 (No Cointegradas)
AAPL vs MSFT: 0.3644 (No Cointegradas)
AAPL vs F: 0.5458 (No Cointegradas)
NVDA vs MSFT: 0.5100 (No Cointegradas)
NVDA vs F: 0.5173 (No Cointegradas)
MSFT vs F: 0.2781 (No Cointegradas)

**ARIMA Models:**
AAPL: Order=(0, 1, 0), AIC=367.55, MSE=0.20
NVDA: Order=(2, 1, 0), AIC=445.27, MSE=12.09
MSFT: Order=(2, 1, 2), AIC=750.94, MSE=1.40
F: Order=(0, 1, 0), AIC=-1830.40, MSE=0.00

=== Resumen de Resultados para Last 10 Days (Every 30 Minutes) ===

**STL Decomposition (Seasonal std / Series std):**
AAPL: 0.1933
NVDA: 0.1148
MSFT: 0.2096
F: 0.3838

**ADF Test (p-values):**
AAPL: 0.6997 (No Estacionaria)
NVDA: 0.8898 (No Estacionaria)
MSFT: 0.8289 (No Estacionaria)
F: 0.0498 (Estacionaria)

**KPSS Test (p-values):**
AAPL: 0.0100 (No Estacionaria)
NVDA: 0.0100 (No Estacionaria)
MSFT: 0.0100 (No Estacionaria)
F: 0.0836 (Estacionaria)

**Cointegration Tests (p-values):**
AAPL vs NVDA: 0.3201 (No Cointegradas)
AAPL vs MSFT: 0.5892 (No Cointegradas)
AAPL vs F: 0.7830 (No Cointegradas)
NVDA vs MSFT: 0.6511 (No Cointegradas)
NVDA vs F: 0.9000 (No Cointegradas)
MSFT vs F: 0.9290 (No Cointegradas)

**ARIMA Models:**
AAPL: Order=(0, 1, 0), AIC=371.16, MSE=0.97
NVDA: Order=(1, 1, 0), AIC=442.47, MSE=25.88
MSFT: Order=(0, 1, 0), AIC=457.43, MSE=63.01
F: Order=(3, 0, 2), AIC=-306.38, MSE=0.03
In [ ]:
from google.colab import drive
import os

# Montar Google Drive (si ya está montado, no hace nada)
drive.mount('/content/drive')

# Definir la carpeta raíz para buscar
root_folder = '/content/drive/My Drive/6to semestre: feb-jun 2025'

# Verificar que la carpeta raíz existe
if not os.path.exists(root_folder):
    print(f"La carpeta {root_folder} no existe. Verifica el nombre.")
    raise FileNotFoundError("Carpeta raíz no encontrada.")

# Función para buscar el archivo recursivamente
def find_file(start_path, file_name):
    print(f"Buscando '{file_name}' en {start_path}...")
    for root, dirs, files in os.walk(start_path):
        for file in files:
            if file_name.lower() in file.lower():  # Búsqueda insensible a mayúsculas
                print(f"Archivo encontrado: {os.path.join(root, file)}")
        for dir in dirs:
            print(f"Subcarpeta encontrada: {os.path.join(root, dir)}")

# Buscar el archivo
find_file(root_folder, 'act clase bloomberg.ipynb')

# Listar el contenido de la carpeta principal para confirmar
print(f"\nContenido de {root_folder}:")
!ls "{root_folder}"

# Listar el contenido de la subcarpeta Series de tiempo (si existe)
subfolder_path = '/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo'
if os.path.exists(subfolder_path):
    print(f"\nContenido de {subfolder_path}:")
    !ls "{subfolder_path}"
else:
    print(f"La carpeta {subfolder_path} no existe. Verifica el nombre.")
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
Buscando 'act clase bloomberg.ipynb' en /content/drive/My Drive/6to semestre: feb-jun 2025...
Subcarpeta encontrada: /content/drive/My Drive/6to semestre: feb-jun 2025/diagnostico financiero
Subcarpeta encontrada: /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo
Subcarpeta encontrada: /content/drive/My Drive/6to semestre: feb-jun 2025/algoritmos y análisis de datos
Subcarpeta encontrada: /content/drive/My Drive/6to semestre: feb-jun 2025/diagnostico financiero/reto
Subcarpeta encontrada: /content/drive/My Drive/6to semestre: feb-jun 2025/diagnostico financiero/m1
Subcarpeta encontrada: /content/drive/My Drive/6to semestre: feb-jun 2025/diagnostico financiero/m2
Subcarpeta encontrada: /content/drive/My Drive/6to semestre: feb-jun 2025/diagnostico financiero/m3

Contenido de /content/drive/My Drive/6to semestre: feb-jun 2025:
'algoritmos y análisis de datos'  'diagnostico financiero'  'series de tiempo'

Contenido de /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo:
'A00838009_of COST WMT XLS ARIMA.html'	  HistoricalPrices-AAPL.xlsx
'A00838009_of COST WMT XLS ARIMA.ipynb'   HistoricalPrices-MSFT.xlsx
 act_2_2.ipynb				 'notas series de tiempo.gdoc'
'ejemplo clase 21 feb 25 intro.ipynb'
In [ ]:
from google.colab import drive
from google.colab import files
import os

# Montar Google Drive
drive.mount('/content/drive')

# Definir la ruta a la carpeta donde está el archivo (ajustada según la salida)
folder_path = '/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo'  # Ajusta según la salida
file_name = 'act clase bloomberg.ipynb'  # Ajusta según la salida
full_path = os.path.join(folder_path, file_name)

# Verificar que el archivo exista
if os.path.exists(full_path):
    print(f"El archivo {full_path} fue encontrado.")
else:
    print(f"Error: El archivo {full_path} no fue encontrado. Verifica la ruta.")
    raise FileNotFoundError("Archivo no encontrado.")

# Cambiar al directorio donde está el archivo
os.chdir(folder_path)
print(f"Directorio actual: {os.getcwd()}")
print("Archivos en el directorio:", os.listdir())

# Instalar nbconvert (por seguridad)
!pip install nbconvert

# Convertir el archivo a HTML (usar el nombre del archivo exacto)
!jupyter nbconvert --to html "{file_name}"

# Descargar el archivo HTML generado
html_file = file_name.replace('.ipynb', '.html')  # Genera el nombre del archivo HTML
if os.path.exists(html_file):
    files.download(html_file)
    print(f"Archivo {html_file} descargado correctamente.")
else:
    print(f"Error: No se generó el archivo {html_file}.")
    raise FileNotFoundError("Archivo HTML no generado.")
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
El archivo /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/act clase bloomberg.ipynb fue encontrado.
Directorio actual: /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo
Archivos en el directorio: ['act clase bloomberg.ipynb', 'ejemplo clase 21 feb 25 intro.ipynb', 'act_2_2.ipynb', 'notas series de tiempo.gdoc', 'HistoricalPrices-AAPL.xlsx', 'HistoricalPrices-MSFT.xlsx', 'A00838009_of COST WMT XLS ARIMA.html', 'A00838009_of COST WMT XLS ARIMA.ipynb']
Requirement already satisfied: nbconvert in /usr/local/lib/python3.11/dist-packages (7.16.6)
Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (4.13.3)
Requirement already satisfied: bleach!=5.0.0 in /usr/local/lib/python3.11/dist-packages (from bleach[css]!=5.0.0->nbconvert) (6.2.0)
Requirement already satisfied: defusedxml in /usr/local/lib/python3.11/dist-packages (from nbconvert) (0.7.1)
Requirement already satisfied: jinja2>=3.0 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (3.1.6)
Requirement already satisfied: jupyter-core>=4.7 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (5.7.2)
Requirement already satisfied: jupyterlab-pygments in /usr/local/lib/python3.11/dist-packages (from nbconvert) (0.3.0)
Requirement already satisfied: markupsafe>=2.0 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (3.0.2)
Requirement already satisfied: mistune<4,>=2.0.3 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (3.1.2)
Requirement already satisfied: nbclient>=0.5.0 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (0.10.2)
Requirement already satisfied: nbformat>=5.7 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (5.10.4)
Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from nbconvert) (24.2)
Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (1.5.1)
Requirement already satisfied: pygments>=2.4.1 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (2.18.0)
Requirement already satisfied: traitlets>=5.1 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (5.7.1)
Requirement already satisfied: webencodings in /usr/local/lib/python3.11/dist-packages (from bleach!=5.0.0->bleach[css]!=5.0.0->nbconvert) (0.5.1)
Requirement already satisfied: tinycss2<1.5,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from bleach[css]!=5.0.0->nbconvert) (1.4.0)
Requirement already satisfied: platformdirs>=2.5 in /usr/local/lib/python3.11/dist-packages (from jupyter-core>=4.7->nbconvert) (4.3.6)
Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.11/dist-packages (from nbclient>=0.5.0->nbconvert) (6.1.12)
Requirement already satisfied: fastjsonschema>=2.15 in /usr/local/lib/python3.11/dist-packages (from nbformat>=5.7->nbconvert) (2.21.1)
Requirement already satisfied: jsonschema>=2.6 in /usr/local/lib/python3.11/dist-packages (from nbformat>=5.7->nbconvert) (4.23.0)
Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.11/dist-packages (from beautifulsoup4->nbconvert) (2.6)
Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.11/dist-packages (from beautifulsoup4->nbconvert) (4.12.2)
Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (25.1.0)
Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (2024.10.1)
Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (0.36.2)
Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (0.23.1)
Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.11/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (24.0.1)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.11/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (2.8.2)
Requirement already satisfied: tornado>=4.1 in /usr/local/lib/python3.11/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (6.4.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.1->jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (1.17.0)
[NbConvertApp] Converting notebook act clase bloomberg.ipynb to html
[NbConvertApp] WARNING | Alternative text is missing on 49 image(s).
[NbConvertApp] Writing 4437110 bytes to act clase bloomberg.html
Archivo act clase bloomberg.html descargado correctamente.
In [ ]:
# Instalación de librerías necesarias
!pip install arch pandas numpy matplotlib statsmodels pmdarima

# Importación de librerías
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from arch import arch_model
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.arima.model import ARIMA
from pmdarima import auto_arima
from statsmodels.stats.diagnostic import acorr_ljungbox
from google.colab import files
import io

# Subir el archivo
uploaded = files.upload()

# Inspeccionar las claves del diccionario uploaded para confirmar el nombre del archivo
print("Archivos subidos:", uploaded.keys())

# Usar la primera clave disponible (asumiendo que solo subiste un archivo)
file_name = list(uploaded.keys())[0]
print(f"Usando el archivo: {file_name}")

# Cargar el archivo desde el buffer
data = pd.read_excel(io.BytesIO(uploaded[file_name]), sheet_name=None)  # Carga todas las hojas

# Nombres de las hojas
sheet_names = ['BLCKROCK 1', 'BLCKROCK 5', 'HII 1', 'HII 5', 'LMT 1', 'LMT 5']

# Diccionario para almacenar los datos
data_dict = {sheet: data[sheet] for sheet in sheet_names}

# Verificar que las hojas se cargaron correctamente
print("Hojas cargadas:", data_dict.keys())
Requirement already satisfied: arch in /usr/local/lib/python3.11/dist-packages (7.2.0)
Requirement already satisfied: pandas in /usr/local/lib/python3.11/dist-packages (2.2.2)
Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (1.26.4)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.11/dist-packages (3.10.0)
Requirement already satisfied: statsmodels in /usr/local/lib/python3.11/dist-packages (0.14.4)
Requirement already satisfied: pmdarima in /usr/local/lib/python3.11/dist-packages (2.0.4)
Requirement already satisfied: scipy>=1.8 in /usr/local/lib/python3.11/dist-packages (from arch) (1.14.1)
Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas) (2025.1)
Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas) (2025.1)
Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (1.3.1)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (0.12.1)
Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (4.56.0)
Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (1.4.8)
Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (24.2)
Requirement already satisfied: pillow>=8 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (11.1.0)
Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (3.2.1)
Requirement already satisfied: patsy>=0.5.6 in /usr/local/lib/python3.11/dist-packages (from statsmodels) (1.0.1)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (1.4.2)
Requirement already satisfied: Cython!=0.29.18,!=0.29.31,>=0.29 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (3.0.12)
Requirement already satisfied: scikit-learn>=0.22 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (1.6.1)
Requirement already satisfied: urllib3 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (2.3.0)
Requirement already satisfied: setuptools!=50.0.0,>=38.6.0 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (75.1.0)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.8.2->pandas) (1.17.0)
Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.11/dist-packages (from scikit-learn>=0.22->pmdarima) (3.5.0)
Upload widget is only available when the cell has been executed in the current browser session. Please rerun this cell to enable.
Saving descarga refinitiv.xlsx to descarga refinitiv (4).xlsx
Archivos subidos: dict_keys(['descarga refinitiv (4).xlsx'])
Usando el archivo: descarga refinitiv (4).xlsx
Hojas cargadas: dict_keys(['BLCKROCK 1', 'BLCKROCK 5', 'HII 1', 'HII 5', 'LMT 1', 'LMT 5'])
In [ ]:
# Ver las columnas de cada hoja
for sheet_name, df in data_dict.items():
    print(f"\nHojas: {sheet_name}")
    print("Columnas disponibles:", df.columns.tolist())
    print("Primeras 5 filas:")
    print(df.head())
Hojas: BLCKROCK 1
Columnas disponibles: ['Local Date', 'Local Time', 'Close', 'Net', '%Chg', 'Open', 'Low', 'High', 'Volume', 'Trade Price']
Primeras 5 filas:
           Local Date          Local Time   Close   Net      %Chg   Open  \
0 2025-03-11 22:53:00 2025-03-11 22:53:00     NaN   NaN       NaN    NaN   
1 2025-03-11 17:57:00 2025-03-11 17:57:00  917.00  0.01  0.000011  917.0   
2 2025-03-11 17:56:00 2025-03-11 17:56:00  916.99 -0.01 -0.000011  928.0   
3 2025-03-11 17:55:00 2025-03-11 17:55:00  917.00  2.00  0.002186  917.0   
4 2025-03-11 17:53:00 2025-03-11 17:53:00  915.00  0.00  0.000000  915.0   

      Low   High  Volume  Trade Price  
0     NaN    NaN     NaN          NaN  
1  917.00  917.0     9.0       917.00  
2  916.99  928.0     4.0       916.99  
3  917.00  917.0     1.0       917.00  
4  915.00  915.0     2.0       915.00  

Hojas: BLCKROCK 5
Columnas disponibles: ['Local Date', 'Local Time', 'Close', 'Net', '%Chg', 'Open', 'Low', 'High', 'Volume', 'Trade Price']
Primeras 5 filas:
           Local Date          Local Time  Close  Net      %Chg   Open  \
0 2025-03-11 22:55:00 2025-03-11 22:55:00    NaN  NaN       NaN    NaN   
1 2025-03-11 18:00:00 2025-03-11 18:00:00  917.0  0.0  0.000000  928.0   
2 2025-03-11 17:55:00 2025-03-11 17:55:00  917.0  6.0  0.006586  915.0   
3 2025-03-11 17:45:00 2025-03-11 17:45:00  911.0 -4.0 -0.004372  911.0   
4 2025-03-11 17:40:00 2025-03-11 17:40:00  915.0  4.0  0.004391  915.0   

      Low   High  Volume  Trade Price  
0     NaN    NaN     NaN          NaN  
1  916.99  928.0    13.0        917.0  
2  915.00  917.0     6.0        917.0  
3  911.00  911.0     1.0        911.0  
4  915.00  915.0     2.0        915.0  

Hojas: HII 1
Columnas disponibles: ['Local Date', 'Local Time', 'Close', 'Net', '%Chg', 'Open', 'Low', 'High', 'Volume', 'Trade Price']
Primeras 5 filas:
           Local Date          Local Time   Close   Net      %Chg    Open  \
0 2025-03-11 22:54:00 2025-03-11 22:54:00     NaN   NaN       NaN     NaN   
1 2025-03-11 17:56:00 2025-03-11 17:56:00     NaN   NaN       NaN     NaN   
2 2025-03-11 17:53:00 2025-03-11 17:53:00  195.00 -0.02 -0.000103  195.00   
3 2025-03-11 17:43:00 2025-03-11 17:43:00  195.02 -1.08 -0.005507  195.02   
4 2025-03-11 17:37:00 2025-03-11 17:37:00     NaN   NaN       NaN     NaN   

      Low    High  Volume  Trade Price  
0     NaN     NaN     NaN          NaN  
1     NaN     NaN     NaN          NaN  
2  195.00  195.00     4.0       195.00  
3  195.02  195.02    13.0       195.02  
4     NaN     NaN     NaN          NaN  

Hojas: HII 5
Columnas disponibles: ['Local Date', 'Local Time', 'Close', 'Net', '%Chg', 'Open', 'Low', 'High', 'Volume', 'Trade Price']
Primeras 5 filas:
           Local Date          Local Time   Close   Net      %Chg    Open  \
0 2025-03-11 22:55:00 2025-03-11 22:55:00     NaN   NaN       NaN     NaN   
1 2025-03-11 18:00:00 2025-03-11 18:00:00     NaN   NaN       NaN     NaN   
2 2025-03-11 17:55:00 2025-03-11 17:55:00  195.00 -0.02 -0.000103  195.00   
3 2025-03-11 17:45:00 2025-03-11 17:45:00  195.02 -1.08 -0.005507  195.02   
4 2025-03-11 17:40:00 2025-03-11 17:40:00     NaN   NaN       NaN     NaN   

      Low    High  Volume  Trade Price  
0     NaN     NaN     NaN          NaN  
1     NaN     NaN     NaN          NaN  
2  195.00  195.00     4.0       195.00  
3  195.02  195.02    13.0       195.02  
4     NaN     NaN     NaN          NaN  

Hojas: LMT 1
Columnas disponibles: ['Local Date', 'Local Time', 'Close', 'Net', '%Chg', 'Open', 'Low', 'High', 'Volume', 'Trade Price']
Primeras 5 filas:
           Local Date          Local Time  Close   Net      %Chg   Open  \
0 2025-03-11 22:54:00 2025-03-11 22:54:00    NaN   NaN       NaN    NaN   
1 2025-03-11 18:01:00 2025-03-11 18:01:00    NaN   NaN       NaN    NaN   
2 2025-03-11 17:59:00 2025-03-11 17:59:00  469.0  0.00  0.000000  469.0   
3 2025-03-11 17:56:00 2025-03-11 17:56:00  469.0  0.00  0.000000  469.0   
4 2025-03-11 17:54:00 2025-03-11 17:54:00  469.0 -2.49 -0.005281  469.0   

     Low   High  Volume  Trade Price  
0    NaN    NaN     NaN          NaN  
1    NaN    NaN     NaN          NaN  
2  469.0  469.0    25.0        469.0  
3  469.0  469.0    10.0        469.0  
4  469.0  469.0    10.0        469.0  

Hojas: LMT 5
Columnas disponibles: ['Local Date', 'Local Time', 'Close', 'Net', '%Chg', 'Open', 'Low', 'High', 'Volume', 'Trade Price']
Primeras 5 filas:
           Local Date          Local Time  Close   Net      %Chg    Open  \
0 2025-03-11 22:55:00 2025-03-11 22:55:00    NaN   NaN       NaN     NaN   
1 2025-03-11 18:05:00 2025-03-11 18:05:00    NaN   NaN       NaN     NaN   
2 2025-03-11 18:00:00 2025-03-11 18:00:00  469.0  0.00  0.000000  469.00   
3 2025-03-11 17:55:00 2025-03-11 17:55:00  469.0 -2.00 -0.004246  471.49   
4 2025-03-11 17:35:00 2025-03-11 17:35:00  471.0  1.87  0.003986  471.00   

     Low    High  Volume  Trade Price  
0    NaN     NaN     NaN          NaN  
1    NaN     NaN     NaN          NaN  
2  469.0  469.00    35.0        469.0  
3  469.0  471.49    11.0        469.0  
4  471.0  471.00    12.0        471.0  
In [ ]:
import pandas as pd
import matplotlib.pyplot as plt

# Función para preprocesar los datos
def preprocess_data(df):
    # Verificar que las columnas necesarias existen
    required_columns = ['Local Date', 'Close']
    missing_columns = [col for col in required_columns if col not in df.columns]
    if missing_columns:
        raise ValueError(f"Faltan las columnas: {missing_columns}")

    # Usar solo 'Local Date' como índice de fecha/hora (ya tiene el formato completo)
    df['Datetime'] = pd.to_datetime(df['Local Date'])
    df.set_index('Datetime', inplace=True)
    # Usar precios de cierre y eliminar NaN
    df = df['Close'].dropna()
    return df

# Preprocesar todas las hojas
processed_data = {}
for key in data_dict.keys():
    try:
        processed_data[key] = preprocess_data(data_dict[key])
    except Exception as e:
        print(f"Error al procesar {key}: {e}")

# Visualizar datos de BlackRock 1min
plt.figure(figsize=(10, 6))
plt.plot(processed_data['BLCKROCK 1'], label='BlackRock 1min Close Price')
plt.title('BlackRock 1min Price Series')
plt.xlabel('DateTime')
plt.ylabel('Close Price')
plt.legend()
plt.show()

# Verificar las primeras filas de BLCKROCK 1
print("Primeras filas de BLCKROCK 1:")
print(processed_data['BLCKROCK 1'].head())
No description has been provided for this image
Primeras filas de BLCKROCK 1:
Datetime
2025-03-11 17:57:00    917.00
2025-03-11 17:56:00    916.99
2025-03-11 17:55:00    917.00
2025-03-11 17:53:00    915.00
2025-03-11 17:52:00    915.00
Name: Close, dtype: float64
In [ ]:
import numpy as np
from statsmodels.tsa.stattools import adfuller, kpss

# Función para pruebas de estacionariedad
def stationarity_tests(series, name):
    # ADF Test (H0: Hay raíz unitaria, no estacionaria)
    adf_result = adfuller(series)
    print(f'{name} - ADF Test: p-value = {adf_result[1]:.5f}')

    # KPSS Test (H0: Es estacionaria)
    kpss_result = kpss(series, regression='c')  # 'c' para constante
    print(f'{name} - KPSS Test: p-value = {kpss_result[1]:.5f}\n')

# Ejecutar pruebas para cada serie de precios
for key, series in processed_data.items():
    print(f"--- {key} (Precios) ---")
    stationarity_tests(series, key)

# Calcular retornos logarítmicos (diferencias logarítmicas)
returns_dict = {key: np.log(series).diff().dropna() for key, series in processed_data.items()}

# Volver a probar estacionariedad en retornos
for key, series in returns_dict.items():
    print(f"--- {key} (Retornos) ---")
    stationarity_tests(series, key)
--- BLCKROCK 1 (Precios) ---
BLCKROCK 1 - ADF Test: p-value = 0.87575
BLCKROCK 1 - KPSS Test: p-value = 0.01000

--- BLCKROCK 5 (Precios) ---
BLCKROCK 5 - ADF Test: p-value = 0.76842
BLCKROCK 5 - KPSS Test: p-value = 0.01000

--- HII 1 (Precios) ---
HII 1 - ADF Test: p-value = 0.45563
HII 1 - KPSS Test: p-value = 0.01000

--- HII 5 (Precios) ---
HII 5 - ADF Test: p-value = 0.02750
HII 5 - KPSS Test: p-value = 0.02340

--- LMT 1 (Precios) ---
LMT 1 - ADF Test: p-value = 0.51288
LMT 1 - KPSS Test: p-value = 0.01000

--- LMT 5 (Precios) ---
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is smaller than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
LMT 5 - ADF Test: p-value = 0.50001
LMT 5 - KPSS Test: p-value = 0.01000

--- BLCKROCK 1 (Retornos) ---
BLCKROCK 1 - ADF Test: p-value = 0.00000
BLCKROCK 1 - KPSS Test: p-value = 0.10000

--- BLCKROCK 5 (Retornos) ---
BLCKROCK 5 - ADF Test: p-value = 0.00001
BLCKROCK 5 - KPSS Test: p-value = 0.10000

--- HII 1 (Retornos) ---
HII 1 - ADF Test: p-value = 0.00000
HII 1 - KPSS Test: p-value = 0.10000

--- HII 5 (Retornos) ---
HII 5 - ADF Test: p-value = 0.00000
HII 5 - KPSS Test: p-value = 0.10000

--- LMT 1 (Retornos) ---
LMT 1 - ADF Test: p-value = 0.00000
LMT 1 - KPSS Test: p-value = 0.10000

--- LMT 5 (Retornos) ---
LMT 5 - ADF Test: p-value = 0.00001
LMT 5 - KPSS Test: p-value = 0.10000

<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
<ipython-input-12-8c5ef1adef2e>:11: InterpolationWarning: The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is greater than the p-value returned.

  kpss_result = kpss(series, regression='c')  # 'c' para constante
In [ ]:
from statsmodels.tsa.arima.model import ARIMA
from pmdarima import auto_arima
from arch import arch_model
from statsmodels.stats.diagnostic import acorr_ljungbox
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

# Asegurarnos de que las series tengan un índice monótono y con frecuencia
for key in returns_dict:
    # Ordenar el índice para hacerlo monótono
    returns_dict[key] = returns_dict[key].sort_index()
    # Asignar una frecuencia (1 minuto)
    returns_dict[key].index = pd.date_range(start=returns_dict[key].index[0],
                                            periods=len(returns_dict[key]),
                                            freq='1min')

# Definir forecast_steps globalmente
forecast_steps = 10

# Función para ajustar ARIMA y hacer forecast
def fit_arima(series, name, order=(1, 0, 1), forecast_steps=forecast_steps):
    model = ARIMA(series, order=order)
    result = model.fit()
    print(f'{name} - ARIMA{order} Summary:')
    print(result.summary())

    # Diagnóstico de residuos
    residuals = result.resid
    print(f'{name} - Ljung-Box Test (normalidad residuos):', acorr_ljungbox(residuals, lags=[10], return_df=True))

    # Forecast
    forecast = result.forecast(steps=forecast_steps)
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series[-50:], label='Observed Returns')
    plt.plot(forecast_index, forecast, label='Forecast', color='red')
    plt.title(f'{name} - ARIMA Forecast')
    plt.legend()
    plt.show()
    return result

# Función para ajustar GARCH
def fit_garch(series, name, p=1, q=1, forecast_steps=forecast_steps):
    model = arch_model(series, vol='Garch', p=p, q=q, dist='normal')
    result = model.fit(disp='off')
    print(f'{name} - GARCH({p},{q}) Summary:')
    print(result.summary())

    # Diagnóstico de residuos
    residuals = result.resid
    print(f'{name} - Ljung-Box Test (normalidad residuos):', acorr_ljungbox(residuals.dropna(), lags=[10], return_df=True))

    # Forecast de volatilidad
    forecast = result.forecast(horizon=forecast_steps)
    volatility = np.sqrt(forecast.variance.values[-1, :])
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series.index[-50:], series[-50:], label='Returns')
    plt.plot(forecast_index, volatility, label='Forecasted Volatility', color='red')
    plt.title(f'{name} - GARCH Volatility Forecast')
    plt.legend()
    plt.show()
    return result

# Ajustar ARIMA manual para cada serie de retornos
arima_results = {}
for key, series in returns_dict.items():
    arima_results[key] = fit_arima(series, key, order=(1, 0, 1))

# AutoARIMA para comparación
auto_arima_results = {}
for key, series in returns_dict.items():
    auto_model = auto_arima(series, seasonal=False, trace=True, suppress_warnings=True)
    auto_arima_results[key] = auto_model
    print(f'{key} - Best AutoARIMA: {auto_model.order}')
    forecast = auto_model.predict(n_periods=forecast_steps)
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series[-50:], label='Observed Returns')
    plt.plot(forecast_index, forecast, label='AutoARIMA Forecast', color='green')
    plt.title(f'{key} - AutoARIMA Forecast')
    plt.legend()
    plt.show()

# Ajustar GARCH para cada serie de retornos
garch_results = {}
for key, series in returns_dict.items():
    garch_results[key] = fit_garch(series, key, p=1, q=1)
BLCKROCK 1 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                 1189
Model:                 ARIMA(1, 0, 1)   Log Likelihood                5024.252
Date:                Thu, 13 Mar 2025   AIC                         -10040.504
Time:                        21:41:33   BIC                         -10020.180
Sample:                    03-10-2025   HQIC                        -10032.845
                         - 03-10-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       1.716e-05   3.92e-05      0.437      0.662   -5.97e-05    9.41e-05
ar.L1         -0.2332      0.018    -12.782      0.000      -0.269      -0.197
ma.L1         -0.5445      0.018    -30.093      0.000      -0.580      -0.509
sigma2      1.247e-05   1.82e-07     68.521      0.000    1.21e-05    1.28e-05
===================================================================================
Ljung-Box (L1) (Q):                   1.08   Jarque-Bera (JB):             13643.25
Prob(Q):                              0.30   Prob(JB):                         0.00
Heteroskedasticity (H):               4.09   Skew:                            -1.04
Prob(H) (two-sided):                  0.00   Kurtosis:                        19.46
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
BLCKROCK 1 - Ljung-Box Test (normalidad residuos):       lb_stat     lb_pvalue
10  64.091689  6.045072e-10
No description has been provided for this image
BLCKROCK 5 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                  477
Model:                 ARIMA(1, 0, 1)   Log Likelihood                1853.082
Date:                Thu, 13 Mar 2025   AIC                          -3698.163
Time:                        21:41:35   BIC                          -3681.493
Sample:                    03-07-2025   HQIC                         -3691.609
                         - 03-07-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       9.469e-05   7.66e-05      1.237      0.216   -5.54e-05       0.000
ar.L1          0.0228      0.045      0.504      0.614      -0.066       0.112
ma.L1         -0.6779      0.039    -17.388      0.000      -0.754      -0.602
sigma2      2.467e-05   7.16e-07     34.429      0.000    2.33e-05    2.61e-05
===================================================================================
Ljung-Box (L1) (Q):                   0.08   Jarque-Bera (JB):              1473.17
Prob(Q):                              0.77   Prob(JB):                         0.00
Heteroskedasticity (H):               0.82   Skew:                             0.19
Prob(H) (two-sided):                  0.21   Kurtosis:                        11.60
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
BLCKROCK 5 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  28.700137   0.001393
No description has been provided for this image
HII 1 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                  977
Model:                 ARIMA(1, 0, 1)   Log Likelihood                4236.019
Date:                Thu, 13 Mar 2025   AIC                          -8464.038
Time:                        21:41:38   BIC                          -8444.500
Sample:                    03-10-2025   HQIC                         -8456.604
                         - 03-10-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       5.286e-06   7.41e-05      0.071      0.943      -0.000       0.000
ar.L1         -0.5031      0.015    -33.629      0.000      -0.532      -0.474
ma.L1         -0.0296      0.023     -1.279      0.201      -0.075       0.016
sigma2      1.001e-05   9.08e-08    110.211      0.000    9.83e-06    1.02e-05
===================================================================================
Ljung-Box (L1) (Q):                   0.00   Jarque-Bera (JB):            138931.28
Prob(Q):                              1.00   Prob(JB):                         0.00
Heteroskedasticity (H):               1.32   Skew:                             3.06
Prob(H) (two-sided):                  0.01   Kurtosis:                        61.10
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
HII 1 - Ljung-Box Test (normalidad residuos):       lb_stat     lb_pvalue
10  67.346543  1.438466e-10
No description has been provided for this image
HII 5 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                  412
Model:                 ARIMA(1, 0, 1)   Log Likelihood                1685.738
Date:                Thu, 13 Mar 2025   AIC                          -3363.476
Time:                        21:41:39   BIC                          -3347.392
Sample:                    03-07-2025   HQIC                         -3357.114
                         - 03-07-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       1.203e-05      0.000      0.089      0.929      -0.000       0.000
ar.L1          0.1325      0.119      1.112      0.266      -0.101       0.366
ma.L1         -0.4171      0.110     -3.798      0.000      -0.632      -0.202
sigma2      1.634e-05   6.62e-07     24.685      0.000     1.5e-05    1.76e-05
===================================================================================
Ljung-Box (L1) (Q):                   0.09   Jarque-Bera (JB):               275.29
Prob(Q):                              0.76   Prob(JB):                         0.00
Heteroskedasticity (H):               1.45   Skew:                             0.13
Prob(H) (two-sided):                  0.03   Kurtosis:                         7.00
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
HII 5 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  10.999124   0.357586
No description has been provided for this image
LMT 1 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                 1367
Model:                 ARIMA(1, 0, 1)   Log Likelihood                6346.673
Date:                Thu, 13 Mar 2025   AIC                         -12685.347
Time:                        21:41:39   BIC                         -12664.465
Sample:                    03-10-2025   HQIC                        -12677.532
                         - 03-11-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       4.222e-06   2.99e-05      0.141      0.888   -5.44e-05    6.28e-05
ar.L1         -0.1103      0.019     -5.772      0.000      -0.148      -0.073
ma.L1         -0.5364      0.019    -28.269      0.000      -0.574      -0.499
sigma2       5.44e-06   7.21e-08     75.470      0.000     5.3e-06    5.58e-06
===================================================================================
Ljung-Box (L1) (Q):                   0.13   Jarque-Bera (JB):             21896.83
Prob(Q):                              0.72   Prob(JB):                         0.00
Heteroskedasticity (H):               3.83   Skew:                            -1.84
Prob(H) (two-sided):                  0.00   Kurtosis:                        22.26
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
LMT 1 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  29.024052   0.001235
No description has been provided for this image
LMT 5 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                  543
Model:                 ARIMA(1, 0, 1)   Log Likelihood                2367.405
Date:                Thu, 13 Mar 2025   AIC                          -4726.810
Time:                        21:41:40   BIC                          -4709.621
Sample:                    03-07-2025   HQIC                         -4720.089
                         - 03-07-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const      -3.264e-05   7.82e-05     -0.417      0.677      -0.000       0.000
ar.L1          0.0620      0.077      0.807      0.420      -0.089       0.213
ma.L1         -0.4911      0.069     -7.098      0.000      -0.627      -0.356
sigma2      9.566e-06    2.9e-07     32.963      0.000       9e-06    1.01e-05
===================================================================================
Ljung-Box (L1) (Q):                   0.01   Jarque-Bera (JB):              1457.55
Prob(Q):                              0.92   Prob(JB):                         0.00
Heteroskedasticity (H):               1.56   Skew:                            -0.98
Prob(H) (two-sided):                  0.00   Kurtosis:                        10.78
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
LMT 5 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  20.761104   0.022822
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-10056.579, Time=1.02 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-9422.857, Time=0.11 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-9896.676, Time=0.06 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-10035.734, Time=0.19 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-10053.942, Time=0.55 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-10039.293, Time=1.03 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,2)(0,0,0)[0]             : AIC=inf, Time=2.76 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,3)(0,0,0)[0]             : AIC=-10046.696, Time=0.47 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-10042.147, Time=0.31 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-10056.084, Time=0.38 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,1)(0,0,0)[0]             : AIC=-10043.850, Time=0.62 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,3)(0,0,0)[0]             : AIC=-10078.957, Time=1.85 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(4,0,3)(0,0,0)[0]             : AIC=-10078.238, Time=0.51 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,4)(0,0,0)[0]             : AIC=-10074.604, Time=1.36 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,4)(0,0,0)[0]             : AIC=-10064.937, Time=0.66 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(4,0,2)(0,0,0)[0]             : AIC=-10045.506, Time=1.35 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(4,0,4)(0,0,0)[0]             : AIC=-10077.976, Time=1.78 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,3)(0,0,0)[0] intercept   : AIC=-10065.474, Time=3.20 sec

Best model:  ARIMA(3,0,3)(0,0,0)[0]          
Total fit time: 18.228 seconds
BLCKROCK 1 - Best AutoARIMA: (3, 0, 3)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-3696.926, Time=0.41 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-3528.864, Time=0.06 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-3647.385, Time=0.08 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-3701.922, Time=0.04 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-3698.741, Time=0.14 sec
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-3701.112, Time=0.10 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-3698.948, Time=0.28 sec
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-3701.318, Time=0.14 sec

Best model:  ARIMA(0,0,1)(0,0,0)[0]          
Total fit time: 1.260 seconds
BLCKROCK 5 - Best AutoARIMA: (0, 0, 1)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-8476.865, Time=1.04 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-8155.722, Time=0.09 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-8467.776, Time=0.06 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-8429.996, Time=0.12 sec
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-8463.153, Time=0.17 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-8463.938, Time=0.41 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,2)(0,0,0)[0]             : AIC=-8470.197, Time=0.85 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,3)(0,0,0)[0]             : AIC=-8470.847, Time=0.34 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-8466.018, Time=0.19 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-8479.730, Time=0.67 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,3)(0,0,0)[0]             : AIC=-8493.616, Time=0.34 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-8423.238, Time=0.42 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,4)(0,0,0)[0]             : AIC=-8491.869, Time=0.43 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,4)(0,0,0)[0]             : AIC=-8491.131, Time=0.68 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,3)(0,0,0)[0] intercept   : AIC=-8492.653, Time=1.89 sec

Best model:  ARIMA(0,0,3)(0,0,0)[0]          
Total fit time: 7.725 seconds
HII 1 - Best AutoARIMA: (0, 0, 3)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-3363.364, Time=0.33 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-3334.149, Time=0.07 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-3363.561, Time=0.07 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-3368.173, Time=0.17 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-3365.340, Time=1.31 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-3366.079, Time=0.31 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-3364.057, Time=0.30 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-3366.184, Time=0.82 sec

Best model:  ARIMA(0,0,1)(0,0,0)[0]          
Total fit time: 3.407 seconds
HII 5 - Best AutoARIMA: (0, 0, 1)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-12669.908, Time=0.45 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-12196.515, Time=0.12 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-12573.272, Time=0.06 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-12675.947, Time=0.20 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-12687.277, Time=0.27 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-12685.327, Time=0.35 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-12685.636, Time=0.40 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-12688.010, Time=0.32 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,3)(0,0,0)[0]             : AIC=-12686.369, Time=0.61 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-12684.450, Time=1.19 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0] intercept   : AIC=-12686.083, Time=1.54 sec

Best model:  ARIMA(0,0,2)(0,0,0)[0]          
Total fit time: 5.521 seconds
LMT 1 - Best AutoARIMA: (0, 0, 2)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-4729.896, Time=0.16 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-4642.682, Time=0.09 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-4711.591, Time=0.07 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-4731.860, Time=0.20 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-4728.657, Time=0.14 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-4729.923, Time=0.21 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-4728.342, Time=0.31 sec
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-4730.007, Time=0.19 sec

Best model:  ARIMA(0,0,1)(0,0,0)[0]          
Total fit time: 1.374 seconds
LMT 5 - Best AutoARIMA: (0, 0, 1)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:309: DataScaleWarning: y is poorly scaled, which may affect convergence of the optimizer when
estimating the model parameters. The scale of y is 2.113e-05. Parameter
estimation work better when this value is between 1 and 1000. The recommended
rescaling is 100 * y.

This warning can be disabled by either rescaling y before initializing the
model or by setting rescale=False.

  warnings.warn(
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
BLCKROCK 1 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:                5400.41
Distribution:                  Normal   AIC:                          -10792.8
Method:            Maximum Likelihood   BIC:                          -10772.5
                                        No. Observations:                 1189
Date:                Thu, Mar 13 2025   Df Residuals:                     1188
Time:                        21:42:19   Df Model:                            1
                                 Mean Model                                 
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
mu         2.3711e-05  4.098e-07     57.865      0.000 [2.291e-05,2.451e-05]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      4.2269e-07  3.929e-11  1.076e+04      0.000 [4.226e-07,4.228e-07]
alpha[1]       0.2000      0.195      1.027      0.305     [ -0.182,  0.582]
beta[1]        0.7800      0.132      5.921  3.190e-09     [  0.522,  1.038]
============================================================================

Covariance estimator: robust
WARNING: The optimizer did not indicate successful convergence. The message was Inequality constraints incompatible.
See convergence_flag.

BLCKROCK 1 - Ljung-Box Test (normalidad residuos):        lb_stat      lb_pvalue
10  545.788606  7.139468e-111
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:309: DataScaleWarning: y is poorly scaled, which may affect convergence of the optimizer when
estimating the model parameters. The scale of y is 3.57e-05. Parameter
estimation work better when this value is between 1 and 1000. The recommended
rescaling is 100 * y.

This warning can be disabled by either rescaling y before initializing the
model or by setting rescale=False.

  warnings.warn(
BLCKROCK 5 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:                1915.74
Distribution:                  Normal   AIC:                          -3823.47
Method:            Maximum Likelihood   BIC:                          -3806.80
                                        No. Observations:                  477
Date:                Thu, Mar 13 2025   Df Residuals:                      476
Time:                        21:42:20   Df Model:                            1
                                 Mean Model                                 
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
mu         1.5293e-04  4.755e-07    321.590      0.000 [1.520e-04,1.539e-04]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      3.5700e-06  5.740e-11  6.220e+04      0.000 [3.570e-06,3.570e-06]
alpha[1]       0.2000  6.121e-02      3.268  1.084e-03   [8.004e-02,  0.320]
beta[1]        0.7000  6.383e-02     10.967  5.532e-28     [  0.575,  0.825]
============================================================================

Covariance estimator: robust
BLCKROCK 5 - Ljung-Box Test (normalidad residuos):        lb_stat     lb_pvalue
10  140.711101  3.012962e-25
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:309: DataScaleWarning: y is poorly scaled, which may affect convergence of the optimizer when
estimating the model parameters. The scale of y is 1.384e-05. Parameter
estimation work better when this value is between 1 and 1000. The recommended
rescaling is 100 * y.

This warning can be disabled by either rescaling y before initializing the
model or by setting rescale=False.

  warnings.warn(
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
HII 1 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:               -3039.94
Distribution:                  Normal   AIC:                           6087.87
Method:            Maximum Likelihood   BIC:                           6107.41
                                        No. Observations:                  977
Date:                Thu, Mar 13 2025   Df Residuals:                      976
Time:                        21:42:20   Df Model:                            1
                               Mean Model                               
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
mu            -0.2677  1.945e-07 -1.376e+06      0.000 [ -0.268, -0.268]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      2.7688e-07  1.552e-11  1.783e+04      0.000 [2.769e-07,2.769e-07]
alpha[1]       0.2000      1.319      0.152      0.879     [ -2.385,  2.785]
beta[1]        0.7800      1.682      0.464      0.643     [ -2.517,  4.077]
============================================================================

Covariance estimator: robust
WARNING: The optimizer did not indicate successful convergence. The message was Inequality constraints incompatible.
See convergence_flag.

HII 1 - Ljung-Box Test (normalidad residuos):      lb_stat     lb_pvalue
10  456.7053  7.754572e-92
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:309: DataScaleWarning: y is poorly scaled, which may affect convergence of the optimizer when
estimating the model parameters. The scale of y is 1.782e-05. Parameter
estimation work better when this value is between 1 and 1000. The recommended
rescaling is 100 * y.

This warning can be disabled by either rescaling y before initializing the
model or by setting rescale=False.

  warnings.warn(
HII 5 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:                1704.39
Distribution:                  Normal   AIC:                          -3400.77
Method:            Maximum Likelihood   BIC:                          -3384.69
                                        No. Observations:                  412
Date:                Thu, Mar 13 2025   Df Residuals:                      411
Time:                        21:42:20   Df Model:                            1
                                  Mean Model                                  
==============================================================================
                  coef    std err          t      P>|t|       95.0% Conf. Int.
------------------------------------------------------------------------------
mu         -4.7746e-05  1.015e-04     -0.471      0.638 [-2.466e-04,1.511e-04]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      1.7817e-06  1.636e-09   1089.098      0.000 [1.779e-06,1.785e-06]
alpha[1]       0.2000  4.375e-02      4.572  4.837e-06     [  0.114,  0.286]
beta[1]        0.7000  4.258e-02     16.438  1.019e-60     [  0.617,  0.783]
============================================================================

Covariance estimator: robust
HII 5 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  38.718779   0.000028
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:309: DataScaleWarning: y is poorly scaled, which may affect convergence of the optimizer when
estimating the model parameters. The scale of y is 7.8e-06. Parameter
estimation work better when this value is between 1 and 1000. The recommended
rescaling is 1000 * y.

This warning can be disabled by either rescaling y before initializing the
model or by setting rescale=False.

  warnings.warn(
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
LMT 1 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:                6899.57
Distribution:                  Normal   AIC:                          -13791.1
Method:            Maximum Likelihood   BIC:                          -13770.3
                                        No. Observations:                 1367
Date:                Thu, Mar 13 2025   Df Residuals:                     1366
Time:                        21:42:21   Df Model:                            1
                                 Mean Model                                 
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
mu         6.9082e-06  3.204e-07     21.559 4.389e-103 [6.280e-06,7.536e-06]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      1.5599e-07  5.708e-11   2733.029      0.000 [1.559e-07,1.561e-07]
alpha[1]       0.2000  4.938e-02      4.050  5.124e-05     [  0.103,  0.297]
beta[1]        0.7800  3.751e-02     20.794  4.889e-96     [  0.706,  0.854]
============================================================================

Covariance estimator: robust
WARNING: The optimizer did not indicate successful convergence. The message was Inequality constraints incompatible.
See convergence_flag.

LMT 1 - Ljung-Box Test (normalidad residuos):        lb_stat     lb_pvalue
10  387.364788  4.591660e-77
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:309: DataScaleWarning: y is poorly scaled, which may affect convergence of the optimizer when
estimating the model parameters. The scale of y is 1.129e-05. Parameter
estimation work better when this value is between 1 and 1000. The recommended
rescaling is 100 * y.

This warning can be disabled by either rescaling y before initializing the
model or by setting rescale=False.

  warnings.warn(
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
LMT 5 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:               -424.595
Distribution:                  Normal   AIC:                           857.189
Method:            Maximum Likelihood   BIC:                           874.378
                                        No. Observations:                  543
Date:                Thu, Mar 13 2025   Df Residuals:                      542
Time:                        21:42:21   Df Model:                            1
                                 Mean Model                                 
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
mu             0.0476  4.865e-07  9.782e+04      0.000 [4.759e-02,4.759e-02]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      2.2578e-07  1.650e-11  1.369e+04      0.000 [2.257e-07,2.258e-07]
alpha[1]       0.1000      0.221      0.453      0.651     [ -0.333,  0.533]
beta[1]        0.8800      0.363      2.424  1.536e-02     [  0.168,  1.592]
============================================================================

Covariance estimator: robust
WARNING: The optimizer did not indicate successful convergence. The message was Inequality constraints incompatible.
See convergence_flag.

LMT 5 - Ljung-Box Test (normalidad residuos):       lb_stat     lb_pvalue
10  94.806292  5.936091e-16
No description has been provided for this image
In [ ]:
from statsmodels.tsa.arima.model import ARIMA
from pmdarima import auto_arima
from arch import arch_model
from statsmodels.stats.diagnostic import acorr_ljungbox
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

# Asegurarnos de que las series tengan un índice monótono y con frecuencia
for key in returns_dict:
    # Ordenar el índice para hacerlo monótono
    returns_dict[key] = returns_dict[key].sort_index()
    # Asignar una frecuencia (1 minuto)
    returns_dict[key].index = pd.date_range(start=returns_dict[key].index[0],
                                            periods=len(returns_dict[key]),
                                            freq='1min')

# Definir forecast_steps globalmente
forecast_steps = 10

# Función para ajustar ARIMA y hacer forecast
def fit_arima(series, name, order=(1, 0, 1), forecast_steps=forecast_steps):
    model = ARIMA(series, order=order)
    result = model.fit()
    print(f'{name} - ARIMA{order} Summary:')
    print(result.summary())

    # Diagnóstico de residuos
    residuals = result.resid
    print(f'{name} - Ljung-Box Test (normalidad residuos):', acorr_ljungbox(residuals, lags=[10], return_df=True))

    # Forecast
    forecast = result.forecast(steps=forecast_steps)
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series[-50:], label='Observed Returns')
    plt.plot(forecast_index, forecast, label='Forecast', color='red')
    plt.title(f'{name} - ARIMA Forecast')
    plt.legend()
    plt.show()
    return result

# Función para ajustar GARCH con reescalado
def fit_garch(series, name, p=1, q=1, forecast_steps=forecast_steps):
    # Reescalar los datos para mejorar la convergencia
    scale_factor = 100  # Multiplicamos por 100 según recomendación
    scaled_series = series * scale_factor

    model = arch_model(scaled_series, vol='Garch', p=p, q=q, dist='normal', rescale=False)
    result = model.fit(disp='off')
    print(f'{name} - GARCH({p},{q}) Summary:')
    print(result.summary())

    # Diagnóstico de residuos
    residuals = result.resid / scale_factor  # Desescalamos los residuos
    print(f'{name} - Ljung-Box Test (normalidad residuos):', acorr_ljungbox(residuals.dropna(), lags=[10], return_df=True))

    # Forecast de volatilidad
    forecast = result.forecast(horizon=forecast_steps)
    volatility = np.sqrt(forecast.variance.values[-1, :]) / scale_factor  # Desescalamos la volatilidad
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series.index[-50:], series[-50:], label='Returns')
    plt.plot(forecast_index, volatility, label='Forecasted Volatility', color='red')
    plt.title(f'{name} - GARCH Volatility Forecast')
    plt.legend()
    plt.show()
    return result

# Ajustar ARIMA manual para cada serie de retornos
arima_results = {}
for key, series in returns_dict.items():
    arima_results[key] = fit_arima(series, key, order=(1, 0, 1))

# AutoARIMA para comparación
auto_arima_results = {}
for key, series in returns_dict.items():
    auto_model = auto_arima(series, seasonal=False, trace=True, suppress_warnings=True)
    auto_arima_results[key] = auto_model
    print(f'{key} - Best AutoARIMA: {auto_model.order}')
    forecast = auto_model.predict(n_periods=forecast_steps)
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series[-50:], label='Observed Returns')
    plt.plot(forecast_index, forecast, label='AutoARIMA Forecast', color='green')
    plt.title(f'{key} - AutoARIMA Forecast')  # Corregido: usar 'key' en lugar de 'name'
    plt.legend()
    plt.show()

# Ajustar GARCH para cada serie de retornos
garch_results = {}
for key, series in returns_dict.items():
    garch_results[key] = fit_garch(series, key, p=1, q=1)
BLCKROCK 1 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                 1189
Model:                 ARIMA(1, 0, 1)   Log Likelihood                5024.252
Date:                Thu, 13 Mar 2025   AIC                         -10040.504
Time:                        21:45:39   BIC                         -10020.180
Sample:                    03-10-2025   HQIC                        -10032.845
                         - 03-10-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       1.716e-05   3.92e-05      0.437      0.662   -5.97e-05    9.41e-05
ar.L1         -0.2332      0.018    -12.782      0.000      -0.269      -0.197
ma.L1         -0.5445      0.018    -30.093      0.000      -0.580      -0.509
sigma2      1.247e-05   1.82e-07     68.521      0.000    1.21e-05    1.28e-05
===================================================================================
Ljung-Box (L1) (Q):                   1.08   Jarque-Bera (JB):             13643.25
Prob(Q):                              0.30   Prob(JB):                         0.00
Heteroskedasticity (H):               4.09   Skew:                            -1.04
Prob(H) (two-sided):                  0.00   Kurtosis:                        19.46
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
BLCKROCK 1 - Ljung-Box Test (normalidad residuos):       lb_stat     lb_pvalue
10  64.091689  6.045072e-10
No description has been provided for this image
BLCKROCK 5 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                  477
Model:                 ARIMA(1, 0, 1)   Log Likelihood                1853.082
Date:                Thu, 13 Mar 2025   AIC                          -3698.163
Time:                        21:45:40   BIC                          -3681.493
Sample:                    03-07-2025   HQIC                         -3691.609
                         - 03-07-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       9.469e-05   7.66e-05      1.237      0.216   -5.54e-05       0.000
ar.L1          0.0228      0.045      0.504      0.614      -0.066       0.112
ma.L1         -0.6779      0.039    -17.388      0.000      -0.754      -0.602
sigma2      2.467e-05   7.16e-07     34.429      0.000    2.33e-05    2.61e-05
===================================================================================
Ljung-Box (L1) (Q):                   0.08   Jarque-Bera (JB):              1473.17
Prob(Q):                              0.77   Prob(JB):                         0.00
Heteroskedasticity (H):               0.82   Skew:                             0.19
Prob(H) (two-sided):                  0.21   Kurtosis:                        11.60
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
BLCKROCK 5 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  28.700137   0.001393
No description has been provided for this image
HII 1 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                  977
Model:                 ARIMA(1, 0, 1)   Log Likelihood                4236.019
Date:                Thu, 13 Mar 2025   AIC                          -8464.038
Time:                        21:45:40   BIC                          -8444.500
Sample:                    03-10-2025   HQIC                         -8456.604
                         - 03-10-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       5.286e-06   7.41e-05      0.071      0.943      -0.000       0.000
ar.L1         -0.5031      0.015    -33.629      0.000      -0.532      -0.474
ma.L1         -0.0296      0.023     -1.279      0.201      -0.075       0.016
sigma2      1.001e-05   9.08e-08    110.211      0.000    9.83e-06    1.02e-05
===================================================================================
Ljung-Box (L1) (Q):                   0.00   Jarque-Bera (JB):            138931.28
Prob(Q):                              1.00   Prob(JB):                         0.00
Heteroskedasticity (H):               1.32   Skew:                             3.06
Prob(H) (two-sided):                  0.01   Kurtosis:                        61.10
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
HII 1 - Ljung-Box Test (normalidad residuos):       lb_stat     lb_pvalue
10  67.346543  1.438466e-10
No description has been provided for this image
HII 5 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                  412
Model:                 ARIMA(1, 0, 1)   Log Likelihood                1685.738
Date:                Thu, 13 Mar 2025   AIC                          -3363.476
Time:                        21:45:41   BIC                          -3347.392
Sample:                    03-07-2025   HQIC                         -3357.114
                         - 03-07-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       1.203e-05      0.000      0.089      0.929      -0.000       0.000
ar.L1          0.1325      0.119      1.112      0.266      -0.101       0.366
ma.L1         -0.4171      0.110     -3.798      0.000      -0.632      -0.202
sigma2      1.634e-05   6.62e-07     24.685      0.000     1.5e-05    1.76e-05
===================================================================================
Ljung-Box (L1) (Q):                   0.09   Jarque-Bera (JB):               275.29
Prob(Q):                              0.76   Prob(JB):                         0.00
Heteroskedasticity (H):               1.45   Skew:                             0.13
Prob(H) (two-sided):                  0.03   Kurtosis:                         7.00
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
HII 5 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  10.999124   0.357586
No description has been provided for this image
LMT 1 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                 1367
Model:                 ARIMA(1, 0, 1)   Log Likelihood                6346.673
Date:                Thu, 13 Mar 2025   AIC                         -12685.347
Time:                        21:45:42   BIC                         -12664.465
Sample:                    03-10-2025   HQIC                        -12677.532
                         - 03-11-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const       4.222e-06   2.99e-05      0.141      0.888   -5.44e-05    6.28e-05
ar.L1         -0.1103      0.019     -5.772      0.000      -0.148      -0.073
ma.L1         -0.5364      0.019    -28.269      0.000      -0.574      -0.499
sigma2       5.44e-06   7.21e-08     75.470      0.000     5.3e-06    5.58e-06
===================================================================================
Ljung-Box (L1) (Q):                   0.13   Jarque-Bera (JB):             21896.83
Prob(Q):                              0.72   Prob(JB):                         0.00
Heteroskedasticity (H):               3.83   Skew:                            -1.84
Prob(H) (two-sided):                  0.00   Kurtosis:                        22.26
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
LMT 1 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  29.024052   0.001235
No description has been provided for this image
LMT 5 - ARIMA(1, 0, 1) Summary:
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  Close   No. Observations:                  543
Model:                 ARIMA(1, 0, 1)   Log Likelihood                2367.405
Date:                Thu, 13 Mar 2025   AIC                          -4726.810
Time:                        21:45:44   BIC                          -4709.621
Sample:                    03-07-2025   HQIC                         -4720.089
                         - 03-07-2025                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const      -3.264e-05   7.82e-05     -0.417      0.677      -0.000       0.000
ar.L1          0.0620      0.077      0.807      0.420      -0.089       0.213
ma.L1         -0.4911      0.069     -7.098      0.000      -0.627      -0.356
sigma2      9.566e-06    2.9e-07     32.963      0.000       9e-06    1.01e-05
===================================================================================
Ljung-Box (L1) (Q):                   0.01   Jarque-Bera (JB):              1457.55
Prob(Q):                              0.92   Prob(JB):                         0.00
Heteroskedasticity (H):               1.56   Skew:                            -0.98
Prob(H) (two-sided):                  0.00   Kurtosis:                        10.78
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
LMT 5 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  20.761104   0.022822
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-10056.579, Time=1.04 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-9422.857, Time=0.12 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-9896.676, Time=0.08 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-10035.734, Time=0.22 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-10053.942, Time=0.28 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-10039.293, Time=0.30 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,2)(0,0,0)[0]             : AIC=inf, Time=1.62 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,3)(0,0,0)[0]             : AIC=-10046.696, Time=0.47 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-10042.147, Time=0.34 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-10056.084, Time=0.35 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,1)(0,0,0)[0]             : AIC=-10043.850, Time=0.64 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,3)(0,0,0)[0]             : AIC=-10078.957, Time=1.79 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(4,0,3)(0,0,0)[0]             : AIC=-10078.238, Time=0.54 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,4)(0,0,0)[0]             : AIC=-10074.604, Time=1.34 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,4)(0,0,0)[0]             : AIC=-10064.937, Time=0.60 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(4,0,2)(0,0,0)[0]             : AIC=-10045.506, Time=3.58 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(4,0,4)(0,0,0)[0]             : AIC=-10077.976, Time=1.63 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,3)(0,0,0)[0] intercept   : AIC=-10065.474, Time=1.29 sec

Best model:  ARIMA(3,0,3)(0,0,0)[0]          
Total fit time: 16.248 seconds
BLCKROCK 1 - Best AutoARIMA: (3, 0, 3)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-3696.926, Time=0.42 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-3528.864, Time=0.06 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-3647.385, Time=0.08 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-3701.922, Time=0.04 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-3698.741, Time=0.13 sec
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-3701.112, Time=0.10 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-3698.948, Time=0.26 sec
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-3701.318, Time=0.13 sec

Best model:  ARIMA(0,0,1)(0,0,0)[0]          
Total fit time: 1.241 seconds
BLCKROCK 5 - Best AutoARIMA: (0, 0, 1)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-8476.865, Time=1.03 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-8155.722, Time=0.09 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-8467.776, Time=0.06 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-8429.996, Time=0.12 sec
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-8463.153, Time=0.17 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-8463.938, Time=0.40 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,2)(0,0,0)[0]             : AIC=-8470.197, Time=0.84 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,3)(0,0,0)[0]             : AIC=-8470.847, Time=0.36 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-8466.018, Time=0.17 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-8479.730, Time=0.65 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,3)(0,0,0)[0]             : AIC=-8493.616, Time=0.36 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-8423.238, Time=0.40 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,4)(0,0,0)[0]             : AIC=-8491.869, Time=0.45 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,4)(0,0,0)[0]             : AIC=-8491.131, Time=1.67 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,3)(0,0,0)[0] intercept   : AIC=-8492.653, Time=3.09 sec

Best model:  ARIMA(0,0,3)(0,0,0)[0]          
Total fit time: 9.901 seconds
HII 1 - Best AutoARIMA: (0, 0, 3)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-3363.364, Time=0.17 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-3334.149, Time=0.09 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-3363.561, Time=0.05 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-3368.173, Time=0.07 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-3365.340, Time=0.20 sec
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-3366.079, Time=0.11 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-3364.057, Time=0.16 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-3366.184, Time=0.34 sec

Best model:  ARIMA(0,0,1)(0,0,0)[0]          
Total fit time: 1.183 seconds
HII 5 - Best AutoARIMA: (0, 0, 1)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-12669.908, Time=0.44 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-12196.515, Time=0.12 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-12573.272, Time=0.07 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-12675.947, Time=0.24 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-12687.277, Time=0.31 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-12685.327, Time=0.34 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-12685.636, Time=0.45 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-12688.010, Time=0.33 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,3)(0,0,0)[0]             : AIC=-12686.369, Time=0.56 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-12684.450, Time=1.17 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0] intercept   : AIC=-12686.083, Time=1.57 sec

Best model:  ARIMA(0,0,2)(0,0,0)[0]          
Total fit time: 5.601 seconds
LMT 1 - Best AutoARIMA: (0, 0, 2)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-4729.896, Time=0.17 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-4642.682, Time=0.06 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-4711.591, Time=0.09 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-4731.860, Time=0.21 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-4728.657, Time=0.12 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-4729.923, Time=0.19 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-4728.342, Time=0.59 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-4730.007, Time=0.65 sec

Best model:  ARIMA(0,0,1)(0,0,0)[0]          
Total fit time: 2.107 seconds
LMT 5 - Best AutoARIMA: (0, 0, 1)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
BLCKROCK 1 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:               -57.0198
Distribution:                  Normal   AIC:                           122.040
Method:            Maximum Likelihood   BIC:                           142.363
                                        No. Observations:                 1189
Date:                Thu, Mar 13 2025   Df Residuals:                     1188
Time:                        21:46:23   Df Model:                            1
                                  Mean Model                                 
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
mu            -0.0106  9.804e-03     -1.082      0.279 [-2.983e-02,8.603e-03]
                               Volatility Model                              
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
omega      2.6303e-03  2.664e-03      0.987      0.324 [-2.592e-03,7.852e-03]
alpha[1]       0.0947  5.223e-02      1.813  6.986e-02   [-7.685e-03,  0.197]
beta[1]        0.8831  6.157e-02     14.342  1.192e-46      [  0.762,  1.004]
=============================================================================

Covariance estimator: robust
BLCKROCK 1 - Ljung-Box Test (normalidad residuos):        lb_stat      lb_pvalue
10  545.788606  7.139468e-111
No description has been provided for this image
BLCKROCK 5 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:               -276.373
Distribution:                  Normal   AIC:                           560.746
Method:            Maximum Likelihood   BIC:                           577.416
                                        No. Observations:                  477
Date:                Thu, Mar 13 2025   Df Residuals:                      476
Time:                        21:46:23   Df Model:                            1
                                  Mean Model                                 
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
mu             0.0146  1.961e-02      0.743      0.457 [-2.386e-02,5.302e-02]
                               Volatility Model                              
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
omega          0.0207  1.535e-02      1.349      0.177 [-9.380e-03,5.078e-02]
alpha[1]       0.1653  4.970e-02      3.326  8.825e-04    [6.787e-02,  0.263]
beta[1]        0.7587  8.606e-02      8.816  1.186e-18      [  0.590,  0.927]
=============================================================================

Covariance estimator: robust
BLCKROCK 5 - Ljung-Box Test (normalidad residuos):        lb_stat     lb_pvalue
10  140.711101  3.012962e-25
No description has been provided for this image
HII 1 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:                157.966
Distribution:                  Normal   AIC:                          -307.933
Method:            Maximum Likelihood   BIC:                          -288.395
                                        No. Observations:                  977
Date:                Thu, Mar 13 2025   Df Residuals:                      976
Time:                        21:46:24   Df Model:                            1
                                  Mean Model                                 
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
mu         1.2825e-03  4.805e-03      0.267      0.790 [-8.136e-03,1.070e-02]
                               Volatility Model                              
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
omega      8.1377e-04  5.201e-04      1.565      0.118 [-2.056e-04,1.833e-03]
alpha[1]       0.1087  3.174e-02      3.426  6.131e-04    [4.652e-02,  0.171]
beta[1]        0.8913  3.896e-02     22.876 8.117e-116      [  0.815,  0.968]
=============================================================================

Covariance estimator: robust
HII 1 - Ljung-Box Test (normalidad residuos):      lb_stat     lb_pvalue
10  456.7053  7.754572e-92
No description has been provided for this image
HII 5 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:               -190.980
Distribution:                  Normal   AIC:                           389.959
Method:            Maximum Likelihood   BIC:                           406.043
                                        No. Observations:                  412
Date:                Thu, Mar 13 2025   Df Residuals:                      411
Time:                        21:46:24   Df Model:                            1
                                  Mean Model                                  
==============================================================================
                  coef    std err          t      P>|t|       95.0% Conf. Int.
------------------------------------------------------------------------------
mu         -2.6599e-03  1.621e-02     -0.164      0.870 [-3.442e-02,2.910e-02]
                               Volatility Model                              
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
omega      9.1798e-03  5.896e-03      1.557      0.120 [-2.377e-03,2.074e-02]
alpha[1]       0.1965  5.094e-02      3.858  1.144e-04    [9.667e-02,  0.296]
beta[1]        0.7733  3.271e-02     23.637 1.589e-123      [  0.709,  0.837]
=============================================================================

Covariance estimator: robust
HII 5 - Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  38.718779   0.000028
No description has been provided for this image
LMT 1 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:                612.566
Distribution:                  Normal   AIC:                          -1217.13
Method:            Maximum Likelihood   BIC:                          -1196.25
                                        No. Observations:                 1367
Date:                Thu, Mar 13 2025   Df Residuals:                     1366
Time:                        21:46:25   Df Model:                            1
                                  Mean Model                                  
==============================================================================
                  coef    std err          t      P>|t|       95.0% Conf. Int.
------------------------------------------------------------------------------
mu         -1.8710e-03  3.673e-03     -0.509      0.610 [-9.070e-03,5.328e-03]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      9.8806e-04  4.120e-04      2.398  1.649e-02 [1.805e-04,1.796e-03]
alpha[1]       0.1549  3.980e-02      3.893  9.908e-05   [7.692e-02,  0.233]
beta[1]        0.8244  3.932e-02     20.969  1.269e-97     [  0.747,  0.902]
============================================================================

Covariance estimator: robust
LMT 1 - Ljung-Box Test (normalidad residuos):        lb_stat     lb_pvalue
10  387.364788  4.591660e-77
No description has been provided for this image
LMT 5 - GARCH(1,1) Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                  Close   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:               -24.4206
Distribution:                  Normal   AIC:                           56.8412
Method:            Maximum Likelihood   BIC:                           74.0296
                                        No. Observations:                  543
Date:                Thu, Mar 13 2025   Df Residuals:                      542
Time:                        21:46:25   Df Model:                            1
                                  Mean Model                                  
==============================================================================
                  coef    std err          t      P>|t|       95.0% Conf. Int.
------------------------------------------------------------------------------
mu         -4.8921e-03  7.861e-03     -0.622      0.534 [-2.030e-02,1.052e-02]
                               Volatility Model                              
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
omega      2.1876e-03  1.786e-03      1.225      0.221 [-1.312e-03,5.687e-03]
alpha[1]       0.1629  4.241e-02      3.840  1.229e-04    [7.975e-02,  0.246]
beta[1]        0.8371  4.579e-02     18.282  1.151e-74      [  0.747,  0.927]
=============================================================================

Covariance estimator: robust
LMT 5 - Ljung-Box Test (normalidad residuos):       lb_stat     lb_pvalue
10  94.806292  5.936091e-16
No description has been provided for this image
In [ ]:
# AutoARIMA para comparación con diagnóstico de residuos
auto_arima_results = {}
for key, series in returns_dict.items():
    auto_model = auto_arima(series, seasonal=False, trace=True, suppress_warnings=True)
    auto_arima_results[key] = auto_model
    print(f'{key} - Best AutoARIMA: {auto_model.order}')

    # Ajustar el modelo ARIMA con el orden encontrado por AutoARIMA para obtener residuos
    arima_model = ARIMA(series, order=auto_model.order)
    arima_fit = arima_model.fit()
    residuals = arima_fit.resid
    print(f'{key} - AutoARIMA Ljung-Box Test (normalidad residuos):',
          acorr_ljungbox(residuals, lags=[10], return_df=True))

    # Forecast
    forecast = auto_model.predict(n_periods=forecast_steps)
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series[-50:], label='Observed Returns')
    plt.plot(forecast_index, forecast, label='AutoARIMA Forecast', color='green')
    plt.title(f'{key} - AutoARIMA Forecast')
    plt.legend()
    plt.show()
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-10056.579, Time=5.48 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-9422.857, Time=0.11 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-9896.676, Time=0.06 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-10035.734, Time=0.19 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-10053.942, Time=0.27 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-10039.293, Time=0.30 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,2)(0,0,0)[0]             : AIC=inf, Time=1.57 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,3)(0,0,0)[0]             : AIC=-10046.696, Time=0.44 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-10042.147, Time=0.28 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-10056.084, Time=0.33 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,1)(0,0,0)[0]             : AIC=-10043.850, Time=0.62 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,3)(0,0,0)[0]             : AIC=-10078.957, Time=4.06 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(4,0,3)(0,0,0)[0]             : AIC=-10078.238, Time=0.50 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,4)(0,0,0)[0]             : AIC=-10074.604, Time=1.34 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,4)(0,0,0)[0]             : AIC=-10064.937, Time=0.59 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(4,0,2)(0,0,0)[0]             : AIC=-10045.506, Time=1.29 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(4,0,4)(0,0,0)[0]             : AIC=-10077.976, Time=1.61 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,3)(0,0,0)[0] intercept   : AIC=-10065.474, Time=1.28 sec

Best model:  ARIMA(3,0,3)(0,0,0)[0]          
Total fit time: 20.349 seconds
BLCKROCK 1 - Best AutoARIMA: (3, 0, 3)
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
BLCKROCK 1 - AutoARIMA Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  37.974302   0.000038
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-3696.926, Time=0.88 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-3528.864, Time=0.09 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-3647.385, Time=0.11 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-3701.922, Time=0.10 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-3698.741, Time=0.50 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-3701.112, Time=0.77 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-3698.948, Time=0.61 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-3701.318, Time=0.36 sec

Best model:  ARIMA(0,0,1)(0,0,0)[0]          
Total fit time: 3.445 seconds
BLCKROCK 5 - Best AutoARIMA: (0, 0, 1)
BLCKROCK 5 - AutoARIMA Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  28.425881   0.001542
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-8476.865, Time=1.05 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-8155.722, Time=0.10 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-8467.776, Time=0.05 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-8429.996, Time=0.11 sec
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-8463.153, Time=0.18 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-8463.938, Time=0.38 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(3,0,2)(0,0,0)[0]             : AIC=-8470.197, Time=0.80 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,3)(0,0,0)[0]             : AIC=-8470.847, Time=0.33 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-8466.018, Time=0.24 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-8479.730, Time=0.65 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,3)(0,0,0)[0]             : AIC=-8493.616, Time=0.35 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-8423.238, Time=0.39 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,4)(0,0,0)[0]             : AIC=-8491.869, Time=0.44 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,4)(0,0,0)[0]             : AIC=-8491.131, Time=0.69 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,3)(0,0,0)[0] intercept   : AIC=-8492.653, Time=1.90 sec

Best model:  ARIMA(0,0,3)(0,0,0)[0]          
Total fit time: 7.671 seconds
HII 1 - Best AutoARIMA: (0, 0, 3)
HII 1 - AutoARIMA Ljung-Box Test (normalidad residuos):      lb_stat  lb_pvalue
10  45.33919   0.000002
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-3363.364, Time=0.16 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-3334.149, Time=0.06 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-3363.561, Time=0.07 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-3368.173, Time=0.07 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-3365.340, Time=0.19 sec
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-3366.079, Time=0.11 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-3364.057, Time=0.15 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-3366.184, Time=0.45 sec

Best model:  ARIMA(0,0,1)(0,0,0)[0]          
Total fit time: 1.262 seconds
HII 5 - Best AutoARIMA: (0, 0, 1)
HII 5 - AutoARIMA Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  10.961326   0.360539
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-12669.908, Time=1.32 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-12196.515, Time=0.17 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-12573.272, Time=0.13 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-12675.947, Time=0.39 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-12687.277, Time=0.40 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-12685.327, Time=0.31 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-12685.636, Time=0.42 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-12688.010, Time=0.31 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,3)(0,0,0)[0]             : AIC=-12686.369, Time=0.54 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-12684.450, Time=1.17 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0] intercept   : AIC=-12686.083, Time=1.53 sec

Best model:  ARIMA(0,0,2)(0,0,0)[0]          
Total fit time: 6.703 seconds
LMT 1 - Best AutoARIMA: (0, 0, 2)
LMT 1 - AutoARIMA Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  28.270945   0.001633
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
Performing stepwise search to minimize aic
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-4729.896, Time=0.17 sec
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-4642.682, Time=0.07 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-4711.591, Time=0.08 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-4731.860, Time=0.23 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-4728.657, Time=0.13 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-4729.923, Time=0.18 sec
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-4728.342, Time=0.30 sec
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-4730.007, Time=0.19 sec

Best model:  ARIMA(0,0,1)(0,0,0)[0]          
Total fit time: 1.359 seconds
LMT 5 - Best AutoARIMA: (0, 0, 1)
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
LMT 5 - AutoARIMA Ljung-Box Test (normalidad residuos):       lb_stat  lb_pvalue
10  18.310571   0.049945
/usr/local/lib/python3.11/dist-packages/sklearn/utils/deprecation.py:151: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
No description has been provided for this image
In [ ]:
!pip install arch
Collecting arch
  Downloading arch-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (13 kB)
Requirement already satisfied: numpy>=1.22.3 in /usr/local/lib/python3.11/dist-packages (from arch) (1.26.4)
Requirement already satisfied: scipy>=1.8 in /usr/local/lib/python3.11/dist-packages (from arch) (1.14.1)
Requirement already satisfied: pandas>=1.4 in /usr/local/lib/python3.11/dist-packages (from arch) (2.2.2)
Requirement already satisfied: statsmodels>=0.12 in /usr/local/lib/python3.11/dist-packages (from arch) (0.14.4)
Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.4->arch) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.4->arch) (2025.1)
Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.4->arch) (2025.1)
Requirement already satisfied: patsy>=0.5.6 in /usr/local/lib/python3.11/dist-packages (from statsmodels>=0.12->arch) (1.0.1)
Requirement already satisfied: packaging>=21.3 in /usr/local/lib/python3.11/dist-packages (from statsmodels>=0.12->arch) (24.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.8.2->pandas>=1.4->arch) (1.17.0)
Downloading arch-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (985 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 985.3/985.3 kB 12.5 MB/s eta 0:00:00
Installing collected packages: arch
Successfully installed arch-7.2.0
In [ ]:
!pip install arch
Collecting arch
  Downloading arch-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (13 kB)
Requirement already satisfied: numpy>=1.22.3 in /usr/local/lib/python3.11/dist-packages (from arch) (1.26.4)
Requirement already satisfied: scipy>=1.8 in /usr/local/lib/python3.11/dist-packages (from arch) (1.14.1)
Requirement already satisfied: pandas>=1.4 in /usr/local/lib/python3.11/dist-packages (from arch) (2.2.2)
Requirement already satisfied: statsmodels>=0.12 in /usr/local/lib/python3.11/dist-packages (from arch) (0.14.4)
Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.4->arch) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.4->arch) (2025.1)
Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.4->arch) (2025.1)
Requirement already satisfied: patsy>=0.5.6 in /usr/local/lib/python3.11/dist-packages (from statsmodels>=0.12->arch) (1.0.1)
Requirement already satisfied: packaging>=21.3 in /usr/local/lib/python3.11/dist-packages (from statsmodels>=0.12->arch) (24.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.8.2->pandas>=1.4->arch) (1.17.0)
Downloading arch-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (985 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 985.3/985.3 kB 36.3 MB/s eta 0:00:00
Installing collected packages: arch
Successfully installed arch-7.2.0
In [ ]:
import arch
print("Librería arch instalada correctamente:", arch.__version__)
Librería arch instalada correctamente: 7.2.0
In [ ]:
import pandas as pd
import numpy as np

# Generar datos de ejemplo (precios simulados)
dates = pd.date_range(start='2024-01-01', periods=1000, freq='1min')
data = pd.DataFrame(
    index=dates,
    data={
        'BLCKROCK 1': np.random.normal(100, 1, 1000).cumsum(),
        'HII 1': np.random.normal(100, 1, 1000).cumsum()
    }
)

# Calcular retornos logarítmicos
returns_dict = {}
for column in data.columns:
    returns = np.log(data[column] / data[column].shift(1)).dropna()
    returns_dict[column] = returns

# Verificar las claves
print("Claves de returns_dict:", returns_dict.keys())
print("Primeros 5 valores de BLCKROCK 1:", returns_dict['BLCKROCK 1'].head())
Claves de returns_dict: dict_keys(['BLCKROCK 1', 'HII 1'])
Primeros 5 valores de BLCKROCK 1: 2024-01-01 00:01:00    0.704463
2024-01-01 00:02:00    0.399723
2024-01-01 00:03:00    0.289378
2024-01-01 00:04:00    0.222853
2024-01-01 00:05:00    0.180718
Freq: min, Name: BLCKROCK 1, dtype: float64
In [ ]:
from arch import arch_model
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.stats.diagnostic import acorr_ljungbox

# Función para ajustar EGARCH(1,1)
def fit_egarch(series, name, p=1, q=1, forecast_steps=10):
    # Reescalar los datos
    scale_factor = 100
    scaled_series = series * scale_factor

    # Ajustar EGARCH(1,1)
    model = arch_model(scaled_series, vol='EGARCH', p=p, q=q, dist='normal', rescale=False)
    result = model.fit(disp='off')
    print(f'{name} - EGARCH({p},{q}) Summary:')
    print(result.summary())

    # Diagnóstico de residuos
    residuals = result.resid / scale_factor
    print(f'{name} - EGARCH Ljung-Box Test (normalidad residuos):',
          acorr_ljungbox(residuals.dropna(), lags=[10], return_df=True))

    # Forecast de volatilidad usando simulación
    forecast = result.forecast(horizon=forecast_steps, method='simulation', simulations=1000)
    volatility = np.sqrt(forecast.variance.iloc[-1, :]) / scale_factor  # Desescalamos la volatilidad
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series.index[-50:], series[-50:], label='Returns')
    plt.plot(forecast_index, volatility, label='Forecasted Volatility', color='purple')
    plt.title(f'{name} - EGARCH Volatility Forecast')
    plt.legend()
    plt.show()
    return result

# Función para ajustar ARIMA-GARCH combinado
def fit_arima_garch(series, name, arima_order, garch_p=1, garch_q=1, forecast_steps=10):
    # Ajustar ARIMA primero
    arima_model = ARIMA(series, order=arima_order)
    arima_result = arima_model.fit()
    print(f'{name} - ARIMA{arima_order} Summary (for ARIMA-GARCH):')
    print(arima_result.summary())

    # Obtener residuos del modelo ARIMA
    arima_resid = arima_result.resid

    # Reescalar los residuos para GARCH
    scale_factor = 100
    scaled_resid = arima_resid * scale_factor

    # Ajustar GARCH a los residuos de ARIMA
    garch_model = arch_model(scaled_resid, vol='Garch', p=garch_p, q=garch_q, dist='normal', rescale=False)
    garch_result = garch_model.fit(disp='off')
    print(f'{name} - GARCH({garch_p},{garch_q}) on ARIMA Residuals Summary:')
    print(garch_result.summary())

    # Diagnóstico de residuos
    residuals = garch_result.resid / scale_factor
    print(f'{name} - ARIMA-GARCH Ljung-Box Test (normalidad residuos):',
          acorr_ljungbox(residuals.dropna(), lags=[10], return_df=True))

    # Forecast de volatilidad usando simulación
    forecast = garch_result.forecast(horizon=forecast_steps, method='simulation', simulations=1000)
    volatility = np.sqrt(forecast.variance.iloc[-1, :]) / scale_factor  # Desescalamos la volatilidad
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series.index[-50:], series[-50:], label='Returns')
    plt.plot(forecast_index, volatility, label='Forecasted Volatility (ARIMA-GARCH)', color='orange')
    plt.title(f'{name} - ARIMA-GARCH Volatility Forecast')
    plt.legend()
    plt.show()
    return arima_result, garch_result

# Ajustar EGARCH para BLCKROCK 1 y HII 1
egarch_results = {}
for key in ['BLCKROCK 1', 'HII 1']:
    egarch_results[key] = fit_egarch(returns_dict[key], key, p=1, q=1)

# Ajustar ARIMA-GARCH para BLCKROCK 1 y HII 1 usando los órdenes de AutoARIMA
arima_garch_results = {}
arima_orders = {'BLCKROCK 1': (3,0,3), 'HII 1': (0,0,3)}  # Órdenes de AutoARIMA
for key in ['BLCKROCK 1', 'HII 1']:
    arima_garch_results[key] = fit_arima_garch(returns_dict[key], key, arima_orders[key])
BLCKROCK 1 - EGARCH(1,1) Summary:
                     Constant Mean - EGARCH Model Results                     
==============================================================================
Dep. Variable:             BLCKROCK 1   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                     EGARCH   Log-Likelihood:                1132.64
Distribution:                  Normal   AIC:                          -2257.28
Method:            Maximum Likelihood   BIC:                          -2237.65
                                        No. Observations:                  999
Date:                Fri, Mar 14 2025   Df Residuals:                      998
Time:                        06:25:15   Df Model:                            1
                               Mean Model                               
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
mu             0.1349  1.919e-04    703.166      0.000 [  0.135,  0.135]
                            Volatility Model                            
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
omega         -0.2216  3.182e-02     -6.965  3.281e-12 [ -0.284, -0.159]
alpha[1]       0.8647      0.114      7.576  3.559e-14 [  0.641,  1.088]
beta[1]        0.9919  2.850e-03    347.981      0.000 [  0.986,  0.998]
========================================================================

Covariance estimator: robust
BLCKROCK 1 - EGARCH Ljung-Box Test (normalidad residuos):         lb_stat  lb_pvalue
10  1570.407401        0.0
No description has been provided for this image
HII 1 - EGARCH(1,1) Summary:
                     Constant Mean - EGARCH Model Results                     
==============================================================================
Dep. Variable:                  HII 1   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                     EGARCH   Log-Likelihood:                1122.29
Distribution:                  Normal   AIC:                          -2236.58
Method:            Maximum Likelihood   BIC:                          -2216.95
                                        No. Observations:                  999
Date:                Fri, Mar 14 2025   Df Residuals:                      998
Time:                        06:25:16   Df Model:                            1
                               Mean Model                               
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
mu             0.1369  3.237e-04    422.975      0.000 [  0.136,  0.138]
                            Volatility Model                            
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
omega         -0.2011  3.204e-02     -6.275  3.506e-10 [ -0.264, -0.138]
alpha[1]       0.7533      0.135      5.563  2.647e-08 [  0.488,  1.019]
beta[1]        0.9916  2.466e-03    402.041      0.000 [  0.987,  0.996]
========================================================================

Covariance estimator: robust
HII 1 - EGARCH Ljung-Box Test (normalidad residuos):        lb_stat  lb_pvalue
10  1573.69187        0.0
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
BLCKROCK 1 - ARIMA(3, 0, 3) Summary (for ARIMA-GARCH):
                               SARIMAX Results                                
==============================================================================
Dep. Variable:             BLCKROCK 1   No. Observations:                  999
Model:                 ARIMA(3, 0, 3)   Log Likelihood                3400.076
Date:                Fri, 14 Mar 2025   AIC                          -6784.152
Time:                        06:25:18   BIC                          -6744.898
Sample:                    01-01-2024   HQIC                         -6769.232
                         - 01-01-2024                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const          0.0395      0.015      2.610      0.009       0.010       0.069
ar.L1         -0.9296      0.020    -46.298      0.000      -0.969      -0.890
ar.L2          0.9593      0.007    146.382      0.000       0.947       0.972
ar.L3          0.9703      0.020     49.442      0.000       0.932       1.009
ma.L1          2.9038      0.043     67.848      0.000       2.820       2.988
ma.L2          2.8750      0.086     33.372      0.000       2.706       3.044
ma.L3          0.9705      0.044     22.025      0.000       0.884       1.057
sigma2      6.392e-05   3.65e-06     17.497      0.000    5.68e-05    7.11e-05
===================================================================================
Ljung-Box (L1) (Q):                  95.89   Jarque-Bera (JB):          13435277.17
Prob(Q):                              0.00   Prob(JB):                         0.00
Heteroskedasticity (H):               0.00   Skew:                           -21.50
Prob(H) (two-sided):                  0.00   Kurtosis:                       569.50
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
BLCKROCK 1 - GARCH(1,1) on ARIMA Residuals Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                   None   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:                1378.75
Distribution:                  Normal   AIC:                          -2749.51
Method:            Maximum Likelihood   BIC:                          -2729.88
                                        No. Observations:                  999
Date:                Fri, Mar 14 2025   Df Residuals:                      998
Time:                        06:25:18   Df Model:                            1
                                   Mean Model                                  
===============================================================================
                  coef    std err          t      P>|t|        95.0% Conf. Int.
-------------------------------------------------------------------------------
mu         -1.1891e-03  3.076e-04     -3.865  1.109e-04 [-1.792e-03,-5.861e-04]
                               Volatility Model                              
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
omega      4.0027e-03  1.004e-04     39.873      0.000  [3.806e-03,4.200e-03]
alpha[1]       1.0000      0.125      7.992  1.328e-15      [  0.755,  1.245]
beta[1]    4.7508e-05  2.467e-03  1.926e-02      0.985 [-4.788e-03,4.883e-03]
=============================================================================

Covariance estimator: robust
WARNING: The optimizer did not indicate successful convergence. The message was Positive directional derivative for linesearch.
See convergence_flag.

BLCKROCK 1 - ARIMA-GARCH Ljung-Box Test (normalidad residuos):        lb_stat     lb_pvalue
10  219.358142  1.456299e-41
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 8. The message is:
Positive directional derivative for linesearch
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
HII 1 - ARIMA(0, 0, 3) Summary (for ARIMA-GARCH):
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  HII 1   No. Observations:                  999
Model:                 ARIMA(0, 0, 3)   Log Likelihood                3270.261
Date:                Fri, 14 Mar 2025   AIC                          -6530.523
Time:                        06:25:21   BIC                          -6505.989
Sample:                    01-01-2024   HQIC                         -6521.198
                         - 01-01-2024                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const          0.0090      0.004      2.552      0.011       0.002       0.016
ma.L1          2.6035      0.037     69.979      0.000       2.531       2.676
ma.L2          2.5371      0.078     32.611      0.000       2.385       2.690
ma.L3          0.9080      0.040     22.488      0.000       0.829       0.987
sigma2        8.3e-05   3.72e-06     22.334      0.000    7.57e-05    9.03e-05
===================================================================================
Ljung-Box (L1) (Q):                 345.48   Jarque-Bera (JB):           1641161.25
Prob(Q):                              0.00   Prob(JB):                         0.00
Heteroskedasticity (H):               0.01   Skew:                            11.10
Prob(H) (two-sided):                  0.00   Kurtosis:                       200.32
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
HII 1 - GARCH(1,1) on ARIMA Residuals Summary:
                     Constant Mean - GARCH Model Results                      
==============================================================================
Dep. Variable:                   None   R-squared:                       0.000
Mean Model:             Constant Mean   Adj. R-squared:                  0.000
Vol Model:                      GARCH   Log-Likelihood:                1131.67
Distribution:                  Normal   AIC:                          -2255.34
Method:            Maximum Likelihood   BIC:                          -2235.71
                                        No. Observations:                  999
Date:                Fri, Mar 14 2025   Df Residuals:                      998
Time:                        06:25:21   Df Model:                            1
                               Mean Model                               
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
mu            -0.1073  8.606e-04   -124.709      0.000 [ -0.109, -0.106]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      1.6579e-05  4.790e-08    346.108      0.000 [1.649e-05,1.667e-05]
alpha[1]       0.2611  2.748e-02      9.503  2.036e-21     [  0.207,  0.315]
beta[1]        0.7389  2.276e-02     32.461 3.746e-231     [  0.694,  0.783]
============================================================================

Covariance estimator: robust
HII 1 - ARIMA-GARCH Ljung-Box Test (normalidad residuos):        lb_stat     lb_pvalue
10  410.178187  6.412226e-82
No description has been provided for this image
In [ ]:
# Modificar la función fit_egarch para usar distribución Student-t
def fit_egarch(series, name, p=1, q=1, forecast_steps=10):
    scale_factor = 100
    scaled_series = series * scale_factor
    model = arch_model(scaled_series, vol='EGARCH', p=p, q=q, dist='t', rescale=False)
    result = model.fit(disp='off')
    print(f'{name} - EGARCH({p},{q}) with Student-t Distribution Summary:')
    print(result.summary())
    residuals = result.resid / scale_factor
    print(f'{name} - EGARCH Ljung-Box Test (normalidad residuos):',
          acorr_ljungbox(residuals.dropna(), lags=[10], return_df=True))
    forecast = result.forecast(horizon=forecast_steps, method='simulation', simulations=1000)
    volatility = np.sqrt(forecast.variance.iloc[-1, :]) / scale_factor
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series.index[-50:], series[-50:], label='Returns')
    plt.plot(forecast_index, volatility, label='Forecasted Volatility', color='purple')
    plt.title(f'{name} - EGARCH Volatility Forecast')
    plt.legend()
    plt.show()
    return result

# Modificar la función fit_arima_garch para usar distribución Student-t
def fit_arima_garch(series, name, arima_order, garch_p=1, garch_q=1, forecast_steps=10):
    arima_model = ARIMA(series, order=arima_order)
    arima_result = arima_model.fit()
    print(f'{name} - ARIMA{arima_order} Summary (for ARIMA-GARCH):')
    print(arima_result.summary())
    arima_resid = arima_result.resid
    scale_factor = 100
    scaled_resid = arima_resid * scale_factor
    garch_model = arch_model(scaled_resid, vol='Garch', p=garch_p, q=garch_q, dist='t', rescale=False)
    garch_result = garch_model.fit(disp='off')
    print(f'{name} - GARCH({garch_p},{garch_q}) on ARIMA Residuals with Student-t Distribution Summary:')
    print(garch_result.summary())
    residuals = garch_result.resid / scale_factor
    print(f'{name} - ARIMA-GARCH Ljung-Box Test (normalidad residuos):',
          acorr_ljungbox(residuals.dropna(), lags=[10], return_df=True))
    forecast = garch_result.forecast(horizon=forecast_steps, method='simulation', simulations=1000)
    volatility = np.sqrt(forecast.variance.iloc[-1, :]) / scale_factor
    forecast_index = pd.date_range(start=series.index[-1], periods=forecast_steps + 1, freq='1min')[1:]
    plt.figure(figsize=(10, 6))
    plt.plot(series.index[-50:], series[-50:], label='Returns')
    plt.plot(forecast_index, volatility, label='Forecasted Volatility (ARIMA-GARCH)', color='orange')
    plt.title(f'{name} - ARIMA-GARCH Volatility Forecast')
    plt.legend()
    plt.show()
    return arima_result, garch_result

# Ajustar los modelos con distribución Student-t
egarch_results = {}
for key in ['BLCKROCK 1', 'HII 1']:
    egarch_results[key] = fit_egarch(returns_dict[key], key, p=1, q=1)

arima_garch_results = {}
arima_orders = {'BLCKROCK 1': (3,0,3), 'HII 1': (0,0,3)}
for key in ['BLCKROCK 1', 'HII 1']:
    arima_garch_results[key] = fit_arima_garch(returns_dict[key], key, arima_orders[key])
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
BLCKROCK 1 - EGARCH(1,1) with Student-t Distribution Summary:
                        Constant Mean - EGARCH Model Results                        
====================================================================================
Dep. Variable:                   BLCKROCK 1   R-squared:                       0.000
Mean Model:                   Constant Mean   Adj. R-squared:                  0.000
Vol Model:                           EGARCH   Log-Likelihood:               -3828.91
Distribution:      Standardized Student's t   AIC:                           7667.81
Method:                  Maximum Likelihood   BIC:                           7692.35
                                              No. Observations:                  999
Date:                      Fri, Mar 14 2025   Df Residuals:                      998
Time:                              06:27:26   Df Model:                            1
                               Mean Model                               
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
mu            -1.9095     20.680 -9.234e-02      0.926 [-42.441, 38.622]
                               Volatility Model                              
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
omega          5.3862     55.938  9.629e-02      0.923 [-1.042e+02,1.150e+02]
alpha[1]      11.6830     84.511      0.138      0.890 [-1.540e+02,1.773e+02]
beta[1]        0.6445      7.328  8.794e-02      0.930      [-13.719, 15.008]
                                 Distribution                                
=============================================================================
                 coef    std err          t      P>|t|       95.0% Conf. Int.
-----------------------------------------------------------------------------
nu             2.0500     81.472  2.516e-02      0.980 [-1.576e+02,1.617e+02]
=============================================================================

Covariance estimator: robust
WARNING: The optimizer did not indicate successful convergence. The message was Inequality constraints incompatible.
See convergence_flag.

BLCKROCK 1 - EGARCH Ljung-Box Test (normalidad residuos):         lb_stat  lb_pvalue
10  1570.407401        0.0
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 9. The message is:
Iteration limit reached
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
HII 1 - EGARCH(1,1) with Student-t Distribution Summary:
                        Constant Mean - EGARCH Model Results                        
====================================================================================
Dep. Variable:                        HII 1   R-squared:                       0.000
Mean Model:                   Constant Mean   Adj. R-squared:                  0.000
Vol Model:                           EGARCH   Log-Likelihood:               -634.342
Distribution:      Standardized Student's t   AIC:                           1278.68
Method:                  Maximum Likelihood   BIC:                           1303.22
                                              No. Observations:                  999
Date:                      Fri, Mar 14 2025   Df Residuals:                      998
Time:                              06:27:28   Df Model:                            1
                               Mean Model                               
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
mu             0.6466  9.179e-03     70.449      0.000 [  0.629,  0.665]
                            Volatility Model                            
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
omega         -0.7897      0.314     -2.515  1.191e-02 [ -1.405, -0.174]
alpha[1]       2.5500      0.571      4.464  8.045e-06 [  1.430,  3.670]
beta[1]        0.8565  7.420e-02     11.544  7.893e-31 [  0.711,  1.002]
                              Distribution                              
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
nu            10.5904      0.447     23.684 5.328e-124 [  9.714, 11.467]
========================================================================

Covariance estimator: robust
WARNING: The optimizer did not indicate successful convergence. The message was Iteration limit reached.
See convergence_flag.

HII 1 - EGARCH Ljung-Box Test (normalidad residuos):        lb_stat  lb_pvalue
10  1573.69187        0.0
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
  warn('Non-stationary starting autoregressive parameters'
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
BLCKROCK 1 - ARIMA(3, 0, 3) Summary (for ARIMA-GARCH):
                               SARIMAX Results                                
==============================================================================
Dep. Variable:             BLCKROCK 1   No. Observations:                  999
Model:                 ARIMA(3, 0, 3)   Log Likelihood                3400.076
Date:                Fri, 14 Mar 2025   AIC                          -6784.152
Time:                        06:27:40   BIC                          -6744.898
Sample:                    01-01-2024   HQIC                         -6769.232
                         - 01-01-2024                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const          0.0395      0.015      2.610      0.009       0.010       0.069
ar.L1         -0.9296      0.020    -46.298      0.000      -0.969      -0.890
ar.L2          0.9593      0.007    146.382      0.000       0.947       0.972
ar.L3          0.9703      0.020     49.442      0.000       0.932       1.009
ma.L1          2.9038      0.043     67.848      0.000       2.820       2.988
ma.L2          2.8750      0.086     33.372      0.000       2.706       3.044
ma.L3          0.9705      0.044     22.025      0.000       0.884       1.057
sigma2      6.392e-05   3.65e-06     17.497      0.000    5.68e-05    7.11e-05
===================================================================================
Ljung-Box (L1) (Q):                  95.89   Jarque-Bera (JB):          13435277.17
Prob(Q):                              0.00   Prob(JB):                         0.00
Heteroskedasticity (H):               0.00   Skew:                           -21.50
Prob(H) (two-sided):                  0.00   Kurtosis:                       569.50
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
BLCKROCK 1 - GARCH(1,1) on ARIMA Residuals with Student-t Distribution Summary:
                        Constant Mean - GARCH Model Results                         
====================================================================================
Dep. Variable:                         None   R-squared:                       0.000
Mean Model:                   Constant Mean   Adj. R-squared:                  0.000
Vol Model:                            GARCH   Log-Likelihood:                2527.83
Distribution:      Standardized Student's t   AIC:                          -5045.66
Method:                  Maximum Likelihood   BIC:                          -5021.12
                                              No. Observations:                  999
Date:                      Fri, Mar 14 2025   Df Residuals:                      998
Time:                              06:27:40   Df Model:                            1
                                  Mean Model                                  
==============================================================================
                  coef    std err          t      P>|t|       95.0% Conf. Int.
------------------------------------------------------------------------------
mu         -1.7306e-04  9.310e-05     -1.859  6.305e-02 [-3.555e-04,9.412e-06]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      6.0026e-06  4.881e-09   1229.894      0.000 [5.993e-06,6.012e-06]
alpha[1]       0.8565  3.536e-02     24.221 1.345e-129     [  0.787,  0.926]
beta[1]        0.1435  2.514e-02      5.708  1.140e-08   [9.422e-02,  0.193]
                                Distribution                                
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
nu           375.8881    104.009      3.614  3.015e-04 [1.720e+02,5.797e+02]
============================================================================

Covariance estimator: robust
BLCKROCK 1 - ARIMA-GARCH Ljung-Box Test (normalidad residuos):        lb_stat     lb_pvalue
10  219.358142  1.456299e-41
No description has been provided for this image
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/statespace/sarimax.py:978: UserWarning: Non-invertible starting MA parameters found. Using zeros as starting parameters.
  warn('Non-invertible starting MA parameters found.'
/usr/local/lib/python3.11/dist-packages/statsmodels/base/model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  warnings.warn("Maximum Likelihood optimization failed to "
HII 1 - ARIMA(0, 0, 3) Summary (for ARIMA-GARCH):
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                  HII 1   No. Observations:                  999
Model:                 ARIMA(0, 0, 3)   Log Likelihood                3270.261
Date:                Fri, 14 Mar 2025   AIC                          -6530.523
Time:                        06:27:42   BIC                          -6505.989
Sample:                    01-01-2024   HQIC                         -6521.198
                         - 01-01-2024                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const          0.0090      0.004      2.552      0.011       0.002       0.016
ma.L1          2.6035      0.037     69.979      0.000       2.531       2.676
ma.L2          2.5371      0.078     32.611      0.000       2.385       2.690
ma.L3          0.9080      0.040     22.488      0.000       0.829       0.987
sigma2        8.3e-05   3.72e-06     22.334      0.000    7.57e-05    9.03e-05
===================================================================================
Ljung-Box (L1) (Q):                 345.48   Jarque-Bera (JB):           1641161.25
Prob(Q):                              0.00   Prob(JB):                         0.00
Heteroskedasticity (H):               0.01   Skew:                            11.10
Prob(H) (two-sided):                  0.00   Kurtosis:                       200.32
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
HII 1 - GARCH(1,1) on ARIMA Residuals with Student-t Distribution Summary:
                        Constant Mean - GARCH Model Results                         
====================================================================================
Dep. Variable:                         None   R-squared:                       0.000
Mean Model:                   Constant Mean   Adj. R-squared:                  0.000
Vol Model:                            GARCH   Log-Likelihood:                1130.82
Distribution:      Standardized Student's t   AIC:                          -2251.64
Method:                  Maximum Likelihood   BIC:                          -2227.11
                                              No. Observations:                  999
Date:                      Fri, Mar 14 2025   Df Residuals:                      998
Time:                              06:27:43   Df Model:                            1
                               Mean Model                               
========================================================================
                 coef    std err          t      P>|t|  95.0% Conf. Int.
------------------------------------------------------------------------
mu            -0.1073  8.658e-04   -123.941      0.000 [ -0.109, -0.106]
                              Volatility Model                              
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
omega      1.7281e-05  1.221e-06     14.154  1.772e-45 [1.489e-05,1.967e-05]
alpha[1]       0.2611  2.738e-02      9.536  1.488e-21     [  0.207,  0.315]
beta[1]        0.7389  2.276e-02     32.470 2.789e-231     [  0.694,  0.783]
                                Distribution                                
============================================================================
                 coef    std err          t      P>|t|      95.0% Conf. Int.
----------------------------------------------------------------------------
nu           233.0322     18.133     12.851  8.490e-38 [1.975e+02,2.686e+02]
============================================================================

Covariance estimator: robust
HII 1 - ARIMA-GARCH Ljung-Box Test (normalidad residuos):        lb_stat     lb_pvalue
10  410.178187  6.412226e-82
No description has been provided for this image
In [ ]:
from google.colab import drive
import os

# 1. Montar Google Drive
drive.mount('/content/drive')

# 2. Definir la ruta del archivo
input_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/tarea modelos de volatilidad.ipynb"
output_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/tarea modelos de volatilidad.html"

# 3. Convertir a HTML usando nbconvert
!jupyter nbconvert --to html "$input_path" --output "$output_path"

print(f"Archivo convertido y guardado en: {output_path}")
Mounted at /content/drive
[NbConvertApp] Converting notebook /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/tarea modelos de volatilidad.ipynb to html
[NbConvertApp] WARNING | Alternative text is missing on 51 image(s).
[NbConvertApp] Writing 4032703 bytes to /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/tarea modelos de volatilidad.html
Archivo convertido y guardado en: /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/tarea modelos de volatilidad.html
In [ ]:
# Importar bibliotecas necesarias
import pandas as pd
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.arima.model import ARIMA
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import warnings
warnings.filterwarnings("ignore")

# Definir las 14 acciones estadounidenses (incluyendo diversificación)
tickers = ['AAPL', 'NVDA', 'MSFT', 'F', 'AMZN', 'TSLA', 'SBUX', 'NKE', 'GOOGL', 'JPM', 'LMT', 'HII', 'KTOS', 'BLK']

# Descargar datos de yfinance (1 año de datos diarios)
data = yf.download(tickers, start='2019-11-29', end='2025-03-12', interval='1d')['Close']
print("Datos descargados:")
print(data.head())

# Calcular retornos diarios
returns = data.pct_change().dropna()
print("\nRetornos diarios:")
print(returns.head())

# --- Métricas de Riesgo ---
# Calcular retorno esperado (media) y riesgo (desviación estándar)
mean_returns = returns.mean() * 252  # Anualizado
std_dev = returns.std() * np.sqrt(252)  # Anualizado

# Calcular VaR (Value at Risk) al 95%
var_95 = returns.quantile(0.05)

# Mostrar métricas de riesgo
risk_metrics = pd.DataFrame({
    'Retorno Esperado Anual (%)': mean_returns * 100,
    'Riesgo Anual (%)': std_dev * 100,
    'VaR 95% (Diario)': var_95
})
print("\nMétricas de Riesgo:")
print(risk_metrics)

# Visualizar métricas de riesgo por sector
sectors = ['Tech', 'Tech', 'Tech', 'Auto', 'Ecom', 'Auto', 'Cons', 'Cons', 'Tech', 'Fin', 'Def', 'Def', 'Def', 'Fin']
risk_metrics['Sector'] = sectors
plt.figure(figsize=(12, 6))
for sector in set(sectors):
    sector_data = risk_metrics[risk_metrics['Sector'] == sector]
    plt.bar(sector_data.index, sector_data['Retorno Esperado Anual (%)'], label=f'Retorno {sector}', alpha=0.5)
plt.title('Métricas de Riesgo por Sector')
plt.xlabel('Acción')
plt.ylabel('Retorno Esperado Anual (%)')
plt.legend()
plt.xticks(rotation=45)
plt.show()

# --- Perfil de Riesgo del Cliente (Simulado) ---
def assess_risk_profile():
    print("\nCuestionario de Perfil de Riesgo (Simulado):")
    print("1. ¿Cuál es su horizonte de inversión? (1: Corto plazo, 2: Mediano plazo, 3: Largo plazo)")
    print("2. ¿Qué nivel de pérdida está dispuesto a tolerar? (1: <5%, 2: 5-10%, 3: >10%)")
    score = 2 + 3  # Ejemplo: horizonte largo plazo y tolerancia alta
    if score >= 5:
        return "Agresivo"
    elif score >= 3:
        return "Moderado"
    else:
        return "Conservador"

risk_profile = assess_risk_profile()
print(f"Perfil de Riesgo del Cliente: {risk_profile}")

# --- Visualización de Precios ---
plt.figure(figsize=(14, 7))
for ticker in tickers:
    plt.plot(data.index, data[ticker], label=ticker)
plt.title('Precios de Cierre Ajustados (1 Año) - Diversificación por Sector')
plt.xlabel('Fecha')
plt.ylabel('Precio (USD)')
plt.legend()
plt.show()

# --- Entregable A: Identificación de Variables y Modelado ---
# Ejemplo con AAPL: Variables dependientes e independientes
data_full = yf.download('AAPL', start='2023-03-12', end='2024-03-12', interval='1d')
data_full['Returns'] = data_full['Close'].pct_change()

# Indicadores Técnicos
exp1 = data_full['Close'].ewm(span=12, adjust=False).mean()
exp2 = data_full['Close'].ewm(span=26, adjust=False).mean()
data_full['MACD'] = exp1 - exp2
data_full['Signal Line'] = data_full['MACD'].ewm(span=9, adjust=False).mean()

delta = data_full['Close'].diff()
gain = (delta.where(delta > 0, 0)).rolling(window=14).mean()
loss = (-delta.where(delta < 0, 0)).rolling(window=14).mean()
rs = gain / loss
data_full['RSI'] = 100 - (100 / (1 + rs))

# Regresión Lineal
features = data_full[['MACD', 'RSI', 'Volume']].dropna()
target = data_full['Returns'].loc[features.index]
model = LinearRegression()
model.fit(features, target)
predictions = model.predict(features)
r2 = r2_score(target, predictions)
print(f"\nBondad de Ajuste (R²) para el modelo de AAPL: {r2:.4f}")

# Visualización de regresión
plt.figure(figsize=(10, 5))
plt.plot(target.index, target, label='Retornos Reales')
plt.plot(target.index, predictions, label='Retornos Predichos')
plt.title('Regresión Lineal: Retornos de AAPL')
plt.xlabel('Fecha')
plt.ylabel('Retorno Diario')
plt.legend()
plt.show()

# --- Pronósticos con ARIMA ---
arima_model = ARIMA(data['AAPL'], order=(0, 1, 0))
arima_fit = arima_model.fit()
forecast = arima_fit.forecast(steps=30)
plt.figure(figsize=(12, 6))
plt.plot(data['AAPL'].tail(50), label='Precio Histórico (AAPL)')
plt.plot(forecast.index, forecast, label='Pronóstico ARIMA', color='red')
plt.title('Pronóstico ARIMA para AAPL')
plt.xlabel('Fecha')
plt.ylabel('Precio (USD)')
plt.legend()
plt.show()
[*********************100%***********************]  14 of 14 completed
Datos descargados:
Ticker           AAPL       AMZN         BLK         F      GOOGL         HII  \
Date                                                                            
2019-11-29  64.702400  90.040001  431.485291  7.030434  64.894241  223.090698   
2019-12-02  63.954300  89.080002  426.942993  6.991636  64.136368  221.645813   
2019-12-03  62.814003  88.498001  423.507904  6.898518  64.428963  220.918915   
2019-12-04  63.368435  88.034500  424.318665  6.945076  65.633209  225.599319   
2019-12-05  64.298088  87.024002  430.384308  6.929556  66.032303  223.409805   

Ticker             JPM       KTOS         LMT        MSFT        NKE  \
Date                                                                   
2019-11-29  113.444168  18.059999  339.589874  144.503052  87.820206   
2019-12-02  113.194496  17.700001  333.154602  142.756149  87.885956   
2019-12-03  111.739426  17.200001  331.417633  142.527039  86.852646   
2019-12-04  113.943550  17.160000  333.988373  143.042542  88.036240   
2019-12-05  114.563461  17.190001  335.169495  143.118896  89.980736   

Ticker          NVDA       SBUX       TSLA  
Date                                        
2019-11-29  5.395878  76.361115  21.996000  
2019-12-02  5.209412  75.592407  22.324667  
2019-12-03  5.169827  75.815865  22.413334  
2019-12-04  5.213643  76.334297  22.202000  
2019-12-05  5.196714  75.485153  22.024668  

Retornos diarios:
Ticker          AAPL      AMZN       BLK         F     GOOGL       HII  \
Date                                                                     
2019-12-02 -0.011562 -0.010662 -0.010527 -0.005519 -0.011679 -0.006477   
2019-12-03 -0.017830 -0.006533 -0.008046 -0.013319  0.004562 -0.003280   
2019-12-04  0.008827 -0.005237  0.001914  0.006749  0.018691  0.021186   
2019-12-05  0.014671 -0.011478  0.014295 -0.002235  0.006081 -0.009705   
2019-12-06  0.019316  0.006389  0.011136  0.010078  0.009367  0.005476   

Ticker           JPM      KTOS       LMT      MSFT       NKE      NVDA  \
Date                                                                     
2019-12-02 -0.002201 -0.019933 -0.018950 -0.012089  0.000749 -0.034557   
2019-12-03 -0.012855 -0.028249 -0.005214 -0.001605 -0.011757 -0.007599   
2019-12-04  0.019726 -0.002326  0.007757  0.003617  0.013628  0.008475   
2019-12-05  0.005441  0.001748  0.003536  0.000534  0.022087 -0.003247   
2019-12-06  0.014880  0.012216  0.002384  0.012139  0.012632  0.016432   

Ticker          SBUX      TSLA  
Date                            
2019-12-02 -0.010067  0.014942  
2019-12-03  0.002956  0.003972  
2019-12-04  0.006838 -0.009429  
2019-12-05 -0.011124 -0.007987  
2019-12-06  0.022143  0.016709  

Métricas de Riesgo:
        Retorno Esperado Anual (%)  Riesgo Anual (%)  VaR 95% (Diario)
Ticker                                                                
AAPL                     28.293378         31.438080         -0.030380
AMZN                     21.149237         35.497342         -0.033104
BLK                      19.049208         31.167133         -0.027187
F                        15.718072         43.860175         -0.039144
GOOGL                    22.856325         32.276298         -0.031118
HII                       3.047011         32.901367         -0.028268
JPM                      18.496011         32.072060         -0.028855
KTOS                     21.338297         49.630111         -0.043072
LMT                       9.566029         26.120065         -0.021305
MSFT                     22.969305         30.163979         -0.028356
NKE                       2.781204         34.495504         -0.030152
NVDA                     71.786184         54.203314         -0.051256
SBUX                     10.648089         33.638328         -0.028716
TSLA                     66.999703         66.857715         -0.062467
No description has been provided for this image
Cuestionario de Perfil de Riesgo (Simulado):
1. ¿Cuál es su horizonte de inversión? (1: Corto plazo, 2: Mediano plazo, 3: Largo plazo)
2. ¿Qué nivel de pérdida está dispuesto a tolerar? (1: <5%, 2: 5-10%, 3: >10%)
Perfil de Riesgo del Cliente: Agresivo
No description has been provided for this image
[*********************100%***********************]  1 of 1 completed
Bondad de Ajuste (R²) para el modelo de AAPL: 0.1273
No description has been provided for this image
No description has been provided for this image
In [ ]:
# Importar bibliotecas necesarias
import pandas as pd
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
from statsmodels.tsa.arima.model import ARIMA
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import warnings
warnings.filterwarnings("ignore")

# Definir las 14 acciones estadounidenses
tickers = ['AAPL', 'NVDA', 'MSFT', 'F', 'AMZN', 'TSLA', 'SBUX', 'NKE', 'GOOGL', 'JPM', 'LMT', 'HII', 'KTOS', 'BLK']

# Descargar datos de yfinance (1 año de datos diarios)
data = yf.download(tickers, start='2019-11-29', end='2025-03-12', interval='1d')['Close']
print("Datos descargados:")
print(data.head())

# Calcular retornos diarios
returns = data.pct_change().dropna()
print("\nRetornos diarios:")
print(returns.head())

# --- Métricas de Riesgo ---
mean_returns = returns.mean() * 252  # Anualizado
std_dev = returns.std() * np.sqrt(252)  # Anualizado
var_95 = returns.quantile(0.05)

risk_metrics = pd.DataFrame({
    'Retorno Esperado Anual (%)': mean_returns * 100,
    'Riesgo Anual (%)': std_dev * 100,
    'VaR 95% (Diario)': var_95
})
print("\nMétricas de Riesgo:")
print(risk_metrics)

# Visualizar métricas de riesgo por sector
sectors = ['Tech', 'Tech', 'Tech', 'Auto', 'Ecom', 'Auto', 'Cons', 'Cons', 'Tech', 'Fin', 'Def', 'Def', 'Def', 'Fin']
risk_metrics['Sector'] = sectors
plt.figure(figsize=(12, 6))
for sector in set(sectors):
    sector_data = risk_metrics[risk_metrics['Sector'] == sector]
    plt.bar(sector_data.index, sector_data['Retorno Esperado Anual (%)'], label=f'Retorno {sector}', alpha=0.5)
plt.title('Métricas de Riesgo por Sector')
plt.xlabel('Acción')
plt.ylabel('Retorno Esperado Anual (%)')
plt.legend()
plt.xticks(rotation=45)
plt.show()

# --- Perfil de Riesgo del Cliente ---
def assess_risk_profile():
    score = 2 + 3  # Ejemplo: horizonte largo plazo y tolerancia alta
    if score >= 5:
        return "Agresivo"
    elif score >= 3:
        return "Moderado"
    else:
        return "Conservador"
risk_profile = assess_risk_profile()
print(f"Perfil de Riesgo del Cliente: {risk_profile}")

# --- Visualización de Precios ---
plt.figure(figsize=(14, 7))
for ticker in tickers:
    plt.plot(data.index, data[ticker], label=ticker)
plt.title('Precios de Cierre Ajustados (1 Año) - Diversificación por Sector')
plt.xlabel('Fecha')
plt.ylabel('Precio (USD)')
plt.legend()
plt.show()

# --- Regresión Lineal para Todas las Acciones ---
r2_scores = {}
for ticker in tickers:
    data_full = yf.download(ticker, start='2023-03-12', end='2024-03-12', interval='1d')
    data_full['Returns'] = data_full['Close'].pct_change()
    exp1 = data_full['Close'].ewm(span=12, adjust=False).mean()
    exp2 = data_full['Close'].ewm(span=26, adjust=False).mean()
    data_full['MACD'] = exp1 - exp2
    data_full['Signal Line'] = data_full['MACD'].ewm(span=9, adjust=False).mean()
    delta = data_full['Close'].diff()
    gain = (delta.where(delta > 0, 0)).rolling(window=14).mean()
    loss = (-delta.where(delta < 0, 0)).rolling(window=14).mean()
    rs = gain / loss
    data_full['RSI'] = 100 - (100 / (1 + rs))
    features = data_full[['MACD', 'RSI', 'Volume']].dropna()
    target = data_full['Returns'].loc[features.index]
    if len(features) > 0 and len(target) > 0:
        model = LinearRegression()
        model.fit(features, target)
        predictions = model.predict(features)
        r2 = r2_score(target, predictions)
        r2_scores[ticker] = r2
        plt.figure(figsize=(10, 5))
        plt.plot(target.index, target, label='Retornos Reales')
        plt.plot(target.index, predictions, label='Retornos Predichos')
        plt.title(f'Regresión Lineal: Retornos de {ticker} (R² = {r2:.4f})')
        plt.xlabel('Fecha')
        plt.ylabel('Retorno Diario')
        plt.legend()
        plt.show()
print("\nR² Scores por Acción:")
for ticker, r2 in r2_scores.items():
    print(f"{ticker}: {r2:.4f}")

# --- Pronóstico ARIMA para Todas las Acciones ---
for ticker in tickers:
    arima_model = ARIMA(data[ticker], order=(0, 1, 0))
    arima_fit = arima_model.fit()
    forecast = arima_fit.forecast(steps=30)
    plt.figure(figsize=(12, 6))
    plt.plot(data[ticker].tail(50), label=f'Precio Histórico ({ticker})')
    plt.plot(forecast.index, forecast, label='Pronóstico ARIMA', color='red')
    plt.title(f'Pronóstico ARIMA para {ticker}')
    plt.xlabel('Fecha')
    plt.ylabel('Precio (USD)')
    plt.legend()
    plt.show()
Output hidden; open in https://colab.research.google.com to view.
In [ ]:
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.stattools import adfuller, kpss

for ticker in tickers[:14]:  # Limitar a 2 para prueba (puedes quitar el límite)
    series = data[ticker].dropna()

    # AR(1)
    ar_model = AutoReg(series, lags=1)
    ar_fit = ar_model.fit()
    ar_forecast = ar_fit.forecast(steps=30)

    # ARIMA(1,1,1)
    arima_model = ARIMA(series, order=(1, 1, 1))
    arima_fit = arima_model.fit()
    arima_forecast = arima_fit.forecast(steps=30)

    # Pruebas de raíz unitaria
    adf_result = adfuller(series)
    kpss_result = kpss(series)
    print(f"\n{ticker} - ADF p-value: {adf_result[1]:.4f}, KPSS p-value: {kpss_result[1]:.4f}")

    # Visualización
    plt.figure(figsize=(12, 6))
    plt.plot(series.tail(50), label=f'Histórico ({ticker})')
    plt.plot(pd.date_range(start=series.index[-1], periods=31, freq='B')[1:], ar_forecast, label='AR Pronóstico', color='blue')
    plt.plot(pd.date_range(start=series.index[-1], periods=31, freq='B')[1:], arima_forecast, label='ARIMA Pronóstico', color='red')
    plt.title(f'Pronósticos AR y ARIMA para {ticker}')
    plt.legend()
    plt.show()
AAPL - ADF p-value: 0.5450, KPSS p-value: 0.0100
No description has been provided for this image
NVDA - ADF p-value: 0.9317, KPSS p-value: 0.0100
No description has been provided for this image
MSFT - ADF p-value: 0.5625, KPSS p-value: 0.0100
No description has been provided for this image
F - ADF p-value: 0.3091, KPSS p-value: 0.0100
No description has been provided for this image
AMZN - ADF p-value: 0.3980, KPSS p-value: 0.0100
No description has been provided for this image
TSLA - ADF p-value: 0.0722, KPSS p-value: 0.0100
No description has been provided for this image
SBUX - ADF p-value: 0.2028, KPSS p-value: 0.0100
No description has been provided for this image
NKE - ADF p-value: 0.5296, KPSS p-value: 0.0100
No description has been provided for this image
GOOGL - ADF p-value: 0.6184, KPSS p-value: 0.0100
No description has been provided for this image
JPM - ADF p-value: 0.8950, KPSS p-value: 0.0100
No description has been provided for this image
LMT - ADF p-value: 0.5171, KPSS p-value: 0.0100
No description has been provided for this image
HII - ADF p-value: 0.2400, KPSS p-value: 0.0100
No description has been provided for this image
KTOS - ADF p-value: 0.4420, KPSS p-value: 0.0100
No description has been provided for this image
BLK - ADF p-value: 0.4805, KPSS p-value: 0.0100
No description has been provided for this image
In [ ]:
import pandas as pd
import yfinance as yf
import numpy as np
from statsmodels.tsa.stattools import adfuller, kpss

# Definir las 14 acciones
tickers = ['AAPL', 'NVDA', 'MSFT', 'F', 'AMZN', 'TSLA', 'SBUX', 'NKE', 'GOOGL', 'JPM', 'LMT', 'HII', 'KTOS', 'BLK']

# Descargar datos desde el inicio de COVID-19 hasta hoy
data = yf.download(tickers, start='2019-11-29', end='2025-03-12', interval='1d')['Close']
log_returns = np.log(data / data.shift(1)).dropna()

# Pruebas para log-retornos
for ticker in tickers:
    series = log_returns[ticker]
    adf_result = adfuller(series)
    kpss_result = kpss(series)
    print(f"\n{ticker} - Log-Retornos - ADF p-value: {adf_result[1]:.4f}, KPSS p-value: {kpss_result[1]:.4f}")
[*********************100%***********************]  14 of 14 completed
AAPL - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

NVDA - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

MSFT - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

F - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

AMZN - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

TSLA - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.0424

SBUX - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

NKE - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

GOOGL - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

JPM - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

LMT - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

HII - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

KTOS - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000

BLK - Log-Retornos - ADF p-value: 0.0000, KPSS p-value: 0.1000
In [ ]:
!pip install pmdarima
Collecting pmdarima
  Downloading pmdarima-2.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl.metadata (7.8 kB)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (1.4.2)
Requirement already satisfied: Cython!=0.29.18,!=0.29.31,>=0.29 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (3.0.12)
Requirement already satisfied: numpy>=1.21.2 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (1.26.4)
Requirement already satisfied: pandas>=0.19 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (2.2.2)
Requirement already satisfied: scikit-learn>=0.22 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (1.6.1)
Requirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (1.14.1)
Requirement already satisfied: statsmodels>=0.13.2 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (0.14.4)
Requirement already satisfied: urllib3 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (2.3.0)
Requirement already satisfied: setuptools!=50.0.0,>=38.6.0 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (75.1.0)
Requirement already satisfied: packaging>=17.1 in /usr/local/lib/python3.11/dist-packages (from pmdarima) (24.2)
Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas>=0.19->pmdarima) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas>=0.19->pmdarima) (2025.1)
Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas>=0.19->pmdarima) (2025.1)
Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.11/dist-packages (from scikit-learn>=0.22->pmdarima) (3.5.0)
Requirement already satisfied: patsy>=0.5.6 in /usr/local/lib/python3.11/dist-packages (from statsmodels>=0.13.2->pmdarima) (1.0.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.8.2->pandas>=0.19->pmdarima) (1.17.0)
Downloading pmdarima-2.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl (2.2 MB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.2/2.2 MB 18.6 MB/s eta 0:00:00
Installing collected packages: pmdarima
Successfully installed pmdarima-2.0.4
In [ ]:
import pmdarima
print(pmdarima.__version__)
2.0.4
In [ ]:
import pandas as pd
import yfinance as yf
import numpy as np
from pmdarima import auto_arima
import matplotlib.pyplot as plt

# Definir las 14 acciones
tickers = ['AAPL', 'NVDA', 'MSFT', 'F', 'AMZN', 'TSLA', 'SBUX', 'NKE', 'GOOGL', 'JPM', 'LMT', 'HII', 'KTOS', 'BLK']

# Descargar datos
data = yf.download(tickers, start='2019-11-29', end='2025-03-12', interval='1d')['Close']
log_returns = np.log(data / data.shift(1)).dropna()

# Optimización de ARIMA para cada acción
for ticker in tickers:
    series = log_returns[ticker].dropna()
    model = auto_arima(series, start_p=0, start_q=0, max_p=5, max_q=5, d=0,
                       seasonal=False, trace=True, error_action='ignore', suppress_warnings=True)
    print(f"\n{ticker} - Mejor orden ARIMA: {model.order}")

    # Pronóstico a 30 días
    forecast = model.predict(n_periods=30)

    # Visualización
    plt.figure(figsize=(12, 6))
    plt.plot(series.tail(50), label=f'Log-Retornos Históricos ({ticker})')
    plt.plot(pd.date_range(start=series.index[-1], periods=31, freq='B')[1:],
             forecast, label='Pronóstico ARIMA', color='red')
    plt.title(f'Pronóstico ARIMA para Log-Retornos de {ticker}')
    plt.xlabel('Fecha')
    plt.ylabel('Log-Retorno')
    plt.legend()
    plt.show()
[*********************100%***********************]  14 of 14 completed
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-6630.353, Time=0.13 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-6642.318, Time=0.47 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-6641.816, Time=0.44 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-6640.624, Time=0.47 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-6640.696, Time=0.46 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-6641.684, Time=4.61 sec
 ARIMA(1,0,0)(0,0,0)[0] intercept   : AIC=-6643.915, Time=0.15 sec
 ARIMA(0,0,0)(0,0,0)[0] intercept   : AIC=-6631.255, Time=0.21 sec
 ARIMA(2,0,0)(0,0,0)[0] intercept   : AIC=-6642.114, Time=0.68 sec
 ARIMA(1,0,1)(0,0,0)[0] intercept   : AIC=-6642.183, Time=0.50 sec
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-6643.466, Time=0.27 sec
 ARIMA(2,0,1)(0,0,0)[0] intercept   : AIC=-6640.084, Time=2.10 sec

Best model:  ARIMA(1,0,0)(0,0,0)[0] intercept
Total fit time: 10.501 seconds

AAPL - Mejor orden ARIMA: (1, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-5193.291, Time=0.08 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-5203.532, Time=0.09 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-5202.317, Time=0.22 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-5205.298, Time=0.21 sec
 ARIMA(3,0,0)(0,0,0)[0]             : AIC=-5204.412, Time=0.27 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-5201.806, Time=0.28 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-5205.124, Time=0.55 sec
 ARIMA(3,0,1)(0,0,0)[0]             : AIC=-5202.465, Time=0.43 sec
 ARIMA(2,0,0)(0,0,0)[0] intercept   : AIC=-5209.760, Time=0.57 sec
 ARIMA(1,0,0)(0,0,0)[0] intercept   : AIC=-5208.703, Time=0.30 sec
 ARIMA(3,0,0)(0,0,0)[0] intercept   : AIC=-5209.304, Time=0.76 sec
 ARIMA(2,0,1)(0,0,0)[0] intercept   : AIC=-5205.482, Time=1.35 sec
 ARIMA(1,0,1)(0,0,0)[0] intercept   : AIC=-5209.959, Time=3.07 sec
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-5207.493, Time=0.59 sec
 ARIMA(1,0,2)(0,0,0)[0] intercept   : AIC=-5208.470, Time=1.63 sec
 ARIMA(0,0,0)(0,0,0)[0] intercept   : AIC=-5197.170, Time=0.24 sec
 ARIMA(0,0,2)(0,0,0)[0] intercept   : AIC=-5208.815, Time=0.57 sec
 ARIMA(2,0,2)(0,0,0)[0] intercept   : AIC=-5224.513, Time=2.78 sec
 ARIMA(3,0,2)(0,0,0)[0] intercept   : AIC=-5206.152, Time=3.19 sec
 ARIMA(2,0,3)(0,0,0)[0] intercept   : AIC=-5206.172, Time=6.21 sec
 ARIMA(1,0,3)(0,0,0)[0] intercept   : AIC=-5208.071, Time=2.64 sec
 ARIMA(3,0,1)(0,0,0)[0] intercept   : AIC=-5207.395, Time=0.77 sec
 ARIMA(3,0,3)(0,0,0)[0] intercept   : AIC=-5204.248, Time=2.58 sec
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-5220.046, Time=1.10 sec

Best model:  ARIMA(2,0,2)(0,0,0)[0] intercept
Total fit time: 30.477 seconds

NVDA - Mejor orden ARIMA: (2, 0, 2)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-6736.799, Time=0.08 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-6775.408, Time=0.08 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-6774.640, Time=0.19 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-6773.425, Time=0.09 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-6773.427, Time=0.54 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-6771.436, Time=2.09 sec
 ARIMA(1,0,0)(0,0,0)[0] intercept   : AIC=-6776.200, Time=0.21 sec
 ARIMA(0,0,0)(0,0,0)[0] intercept   : AIC=-6736.754, Time=0.38 sec
 ARIMA(2,0,0)(0,0,0)[0] intercept   : AIC=-6774.245, Time=0.44 sec
 ARIMA(1,0,1)(0,0,0)[0] intercept   : AIC=-6774.253, Time=1.21 sec
 ARIMA(0,0,1)(0,0,0)[0] intercept   : AIC=-6775.605, Time=0.57 sec
 ARIMA(2,0,1)(0,0,0)[0] intercept   : AIC=-6772.243, Time=1.31 sec

Best model:  ARIMA(1,0,0)(0,0,0)[0] intercept
Total fit time: 7.182 seconds

MSFT - Mejor orden ARIMA: (1, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-5747.087, Time=0.07 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-5746.311, Time=0.08 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-5746.285, Time=0.18 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-5744.319, Time=0.25 sec
 ARIMA(0,0,0)(0,0,0)[0] intercept   : AIC=-5745.189, Time=0.25 sec

Best model:  ARIMA(0,0,0)(0,0,0)[0]          
Total fit time: 0.832 seconds

F - Mejor orden ARIMA: (0, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-6309.314, Time=0.08 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-6308.682, Time=0.08 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-6308.642, Time=0.18 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-6307.413, Time=0.11 sec
 ARIMA(0,0,0)(0,0,0)[0] intercept   : AIC=-6308.235, Time=0.25 sec

Best model:  ARIMA(0,0,0)(0,0,0)[0]          
Total fit time: 0.694 seconds

AMZN - Mejor orden ARIMA: (0, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-4634.457, Time=0.07 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-4632.521, Time=0.15 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-4632.508, Time=0.10 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-4634.671, Time=0.54 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-4634.509, Time=0.48 sec
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-4633.650, Time=0.26 sec
 ARIMA(0,0,2)(0,0,0)[0]             : AIC=-4633.395, Time=0.10 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-4633.461, Time=0.22 sec
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-4638.474, Time=1.12 sec
 ARIMA(3,0,2)(0,0,0)[0]             : AIC=-4634.019, Time=1.29 sec
 ARIMA(2,0,3)(0,0,0)[0]             : AIC=-4635.573, Time=3.55 sec
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-4632.150, Time=0.55 sec
 ARIMA(3,0,1)(0,0,0)[0]             : AIC=-4631.744, Time=0.24 sec
 ARIMA(3,0,3)(0,0,0)[0]             : AIC=-4634.414, Time=1.61 sec
 ARIMA(2,0,2)(0,0,0)[0] intercept   : AIC=-4635.192, Time=1.70 sec

Best model:  ARIMA(2,0,2)(0,0,0)[0]          
Total fit time: 12.001 seconds

TSLA - Mejor orden ARIMA: (2, 0, 2)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-6469.817, Time=0.07 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-6484.467, Time=0.12 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-6482.907, Time=0.23 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-6485.144, Time=0.19 sec
 ARIMA(3,0,0)(0,0,0)[0]             : AIC=-6483.160, Time=0.49 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-6483.131, Time=0.36 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-6484.396, Time=0.59 sec
 ARIMA(3,0,1)(0,0,0)[0]             : AIC=-6481.141, Time=0.48 sec
 ARIMA(2,0,0)(0,0,0)[0] intercept   : AIC=-6483.281, Time=0.63 sec

Best model:  ARIMA(2,0,0)(0,0,0)[0]          
Total fit time: 3.164 seconds

SBUX - Mejor orden ARIMA: (2, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-6367.793, Time=0.08 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-6366.011, Time=0.10 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-6366.003, Time=0.10 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-6364.007, Time=0.34 sec
 ARIMA(0,0,0)(0,0,0)[0] intercept   : AIC=-6365.838, Time=0.28 sec

Best model:  ARIMA(0,0,0)(0,0,0)[0]          
Total fit time: 0.901 seconds

NKE - Mejor orden ARIMA: (0, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-6557.287, Time=0.08 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-6565.211, Time=0.08 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-6564.898, Time=0.13 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-6563.536, Time=0.27 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-6563.802, Time=0.31 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-6561.478, Time=2.23 sec
 ARIMA(1,0,0)(0,0,0)[0] intercept   : AIC=-6565.084, Time=0.22 sec

Best model:  ARIMA(1,0,0)(0,0,0)[0]          
Total fit time: 3.330 seconds

GOOGL - Mejor orden ARIMA: (1, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-6583.699, Time=0.12 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-6602.898, Time=0.18 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-6599.139, Time=0.26 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-6613.971, Time=0.23 sec
 ARIMA(3,0,0)(0,0,0)[0]             : AIC=-6611.971, Time=0.31 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-6611.971, Time=0.42 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-6603.719, Time=0.28 sec
 ARIMA(3,0,1)(0,0,0)[0]             : AIC=-6610.006, Time=0.63 sec
 ARIMA(2,0,0)(0,0,0)[0] intercept   : AIC=-6612.927, Time=0.24 sec

Best model:  ARIMA(2,0,0)(0,0,0)[0]          
Total fit time: 2.679 seconds

JPM - Mejor orden ARIMA: (2, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-7109.577, Time=0.08 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-7108.315, Time=0.10 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-7108.304, Time=0.20 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-7106.309, Time=0.35 sec
 ARIMA(0,0,0)(0,0,0)[0] intercept   : AIC=-7107.864, Time=0.24 sec

Best model:  ARIMA(0,0,0)(0,0,0)[0]          
Total fit time: 0.970 seconds

LMT - Mejor orden ARIMA: (0, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-6436.899, Time=0.10 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-6437.547, Time=0.09 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-6437.517, Time=0.17 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-6435.591, Time=0.24 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-6435.617, Time=0.21 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-6433.584, Time=0.40 sec
 ARIMA(1,0,0)(0,0,0)[0] intercept   : AIC=-6435.579, Time=0.32 sec

Best model:  ARIMA(1,0,0)(0,0,0)[0]          
Total fit time: 1.534 seconds

HII - Mejor orden ARIMA: (1, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-5422.159, Time=0.08 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-5424.112, Time=0.17 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-5423.669, Time=0.22 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-5427.409, Time=0.29 sec
 ARIMA(3,0,0)(0,0,0)[0]             : AIC=-5426.576, Time=0.30 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-5424.460, Time=0.24 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-5425.644, Time=0.57 sec
 ARIMA(3,0,1)(0,0,0)[0]             : AIC=-5424.660, Time=0.36 sec
 ARIMA(2,0,0)(0,0,0)[0] intercept   : AIC=-5425.582, Time=0.61 sec

Best model:  ARIMA(2,0,0)(0,0,0)[0]          
Total fit time: 2.851 seconds

KTOS - Mejor orden ARIMA: (2, 0, 0)
No description has been provided for this image
Performing stepwise search to minimize aic
 ARIMA(0,0,0)(0,0,0)[0]             : AIC=-6656.280, Time=0.08 sec
 ARIMA(1,0,0)(0,0,0)[0]             : AIC=-6659.778, Time=0.09 sec
 ARIMA(0,0,1)(0,0,0)[0]             : AIC=-6659.427, Time=0.40 sec
 ARIMA(2,0,0)(0,0,0)[0]             : AIC=-6659.253, Time=0.40 sec
 ARIMA(1,0,1)(0,0,0)[0]             : AIC=-6658.914, Time=2.16 sec
 ARIMA(2,0,1)(0,0,0)[0]             : AIC=-6669.046, Time=1.48 sec
 ARIMA(3,0,1)(0,0,0)[0]             : AIC=-6655.882, Time=0.44 sec
 ARIMA(2,0,2)(0,0,0)[0]             : AIC=-6701.639, Time=1.16 sec
 ARIMA(1,0,2)(0,0,0)[0]             : AIC=-6666.868, Time=0.75 sec
 ARIMA(3,0,2)(0,0,0)[0]             : AIC=-6655.833, Time=1.37 sec
 ARIMA(2,0,3)(0,0,0)[0]             : AIC=-6655.107, Time=1.48 sec
 ARIMA(1,0,3)(0,0,0)[0]             : AIC=-6656.633, Time=0.79 sec
 ARIMA(3,0,3)(0,0,0)[0]             : AIC=-6653.673, Time=1.51 sec
 ARIMA(2,0,2)(0,0,0)[0] intercept   : AIC=-6664.117, Time=2.07 sec

Best model:  ARIMA(2,0,2)(0,0,0)[0]          
Total fit time: 14.190 seconds

BLK - Mejor orden ARIMA: (2, 0, 2)
No description has been provided for this image
In [ ]:
import pandas as pd
import yfinance as yf
from statsmodels.tsa.stattools import coint

# Descargar datos
tickers = ['AAPL', 'NVDA', 'MSFT', 'F', 'AMZN', 'TSLA', 'SBUX', 'NKE', 'GOOGL', 'JPM', 'LMT', 'HII', 'KTOS', 'BLK']
data = yf.download(tickers, start='2019-11-29', end='2025-03-12', interval='1d')['Close']

# Pruebas de cointegración con AAPL como referencia
for ticker in tickers[1:]:  # Excluir AAPL
    result = coint(data['AAPL'], data[ticker])
    print(f"\nCointegración AAPL vs {ticker}: p-value = {result[1]:.4f}")
[*********************100%***********************]  14 of 14 completed
Cointegración AAPL vs NVDA: p-value = 0.1815

Cointegración AAPL vs MSFT: p-value = 0.3646

Cointegración AAPL vs F: p-value = 0.7952

Cointegración AAPL vs AMZN: p-value = 0.7734

Cointegración AAPL vs TSLA: p-value = 0.8174

Cointegración AAPL vs SBUX: p-value = 0.7472

Cointegración AAPL vs NKE: p-value = 0.5437

Cointegración AAPL vs GOOGL: p-value = 0.2258

Cointegración AAPL vs JPM: p-value = 0.1045

Cointegración AAPL vs LMT: p-value = 0.1389

Cointegración AAPL vs HII: p-value = 0.5042

Cointegración AAPL vs KTOS: p-value = 0.6507

Cointegración AAPL vs BLK: p-value = 0.4392
In [ ]:
import pandas as pd
import yfinance as yf
import numpy as np
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
import matplotlib.pyplot as plt

# Descargar datos
data = yf.download(tickers, start='2019-11-29', end='2025-03-12', interval='1d')['Close']
log_returns = np.log(data / data.shift(1)).dropna()

# Modelos AR y ARMA para las primeras 2 acciones (puedes expandir)
for ticker in tickers[:2]:
    series = log_returns[ticker].dropna()

    # AR(1)
    ar_model = AutoReg(series, lags=1)
    ar_fit = ar_model.fit()
    ar_forecast = ar_fit.forecast(steps=30)

    # ARMA(1,1)
    arma_model = ARIMA(series, order=(1, 0, 1))
    arma_fit = arma_model.fit()
    arma_forecast = arma_fit.forecast(steps=30)

    # Visualización
    plt.figure(figsize=(12, 6))
    plt.plot(series.tail(50), label=f'Log-Retornos Históricos ({ticker})')
    plt.plot(pd.date_range(start=series.index[-1], periods=31, freq='B')[1:],
             ar_forecast, label='AR(1) Pronóstico', color='blue')
    plt.plot(pd.date_range(start=series.index[-1], periods=31, freq='B')[1:],
             arma_forecast, label='ARMA(1,1) Pronóstico', color='green')
    plt.title(f'Pronósticos AR y ARMA para Log-Retornos de {ticker}')
    plt.xlabel('Fecha')
    plt.ylabel('Log-Retorno')
    plt.legend()
    plt.show()
[*********************100%***********************]  14 of 14 completed
No description has been provided for this image
No description has been provided for this image
In [ ]:
import pandas as pd
import yfinance as yf
import numpy as np
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
import matplotlib.pyplot as plt

# Definir las acciones seleccionadas
tickers = ['NVDA', 'TSLA', 'AMZN']

# Descargar datos
data = yf.download(tickers, start='2019-11-29', end='2025-03-12', interval='1d')['Close']
log_returns = np.log(data / data.shift(1)).dropna()

# Modelos AR y ARMA
for ticker in tickers:
    series = log_returns[ticker].dropna()

    # AR(1)
    ar_model = AutoReg(series, lags=1)
    ar_fit = ar_model.fit()
    ar_forecast = ar_fit.forecast(steps=30)

    # ARMA(1,1)
    arma_model = ARIMA(series, order=(1, 0, 1))
    arma_fit = arma_model.fit()
    arma_forecast = arma_fit.forecast(steps=30)

    # Visualización
    plt.figure(figsize=(12, 6))
    plt.plot(series.tail(50), label=f'Log-Retornos Históricos ({ticker})')
    plt.plot(pd.date_range(start=series.index[-1], periods=31, freq='B')[1:],
             ar_forecast, label='AR(1) Pronóstico', color='blue')
    plt.plot(pd.date_range(start=series.index[-1], periods=31, freq='B')[1:],
             arma_forecast, label='ARMA(1,1) Pronóstico', color='green')
    plt.title(f'Pronósticos AR y ARMA para Log-Retornos de {ticker}')
    plt.xlabel('Fecha')
    plt.ylabel('Log-Retorno')
    plt.legend()
    plt.show()
[*********************100%***********************]  3 of 3 completed
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
In [ ]:
import pandas as pd
import yfinance as yf
import numpy as np
import matplotlib.pyplot as plt

tickers = ['NVDA', 'TSLA', 'AMZN']
for ticker in tickers:
    data_full = yf.download(ticker, start='2019-11-29', end='2025-03-12', interval='1d')
    data_full['Log_Returns'] = np.log(data_full['Close'] / data_full['Close'].shift(1))
    exp1 = data_full['Close'].ewm(span=12).mean()
    exp2 = data_full['Close'].ewm(span=26).mean()
    data_full['MACD'] = exp1 - exp2
    data_full['Signal'] = data_full['MACD'].ewm(span=9).mean()
    delta = data_full['Close'].diff()
    gain = (delta.where(delta > 0, 0)).rolling(window=14).mean()
    loss = (-delta.where(delta < 0, 0)).rolling(window=14).mean()
    rs = gain / loss
    data_full['RSI'] = 100 - (100 / (1 + rs))

    signals = pd.DataFrame(index=data_full.index)
    signals['Signal'] = 0
    signals.loc[(data_full['MACD'] > data_full['Signal']) & (data_full['RSI'] < 30), 'Signal'] = 1
    signals.loc[(data_full['MACD'] < data_full['Signal']) & (data_full['RSI'] > 70), 'Signal'] = -1
    signals['Returns'] = data_full['Log_Returns']
    signals['Strategy'] = signals['Signal'].shift(1) * signals['Returns']

    plt.figure(figsize=(12, 6))
    plt.plot(signals['Strategy'].cumsum(), label='Retorno Estratégico')
    plt.plot(signals['Returns'].cumsum(), label='Retorno Buy & Hold')
    plt.title(f'Backtesting para {ticker}')
    plt.legend()
    plt.show()
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
[*********************100%***********************]  1 of 1 completed
No description has been provided for this image
In [ ]:
!pip install nbconvert
Requirement already satisfied: nbconvert in /usr/local/lib/python3.11/dist-packages (7.16.6)
Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (4.13.3)
Requirement already satisfied: bleach!=5.0.0 in /usr/local/lib/python3.11/dist-packages (from bleach[css]!=5.0.0->nbconvert) (6.2.0)
Requirement already satisfied: defusedxml in /usr/local/lib/python3.11/dist-packages (from nbconvert) (0.7.1)
Requirement already satisfied: jinja2>=3.0 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (3.1.6)
Requirement already satisfied: jupyter-core>=4.7 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (5.7.2)
Requirement already satisfied: jupyterlab-pygments in /usr/local/lib/python3.11/dist-packages (from nbconvert) (0.3.0)
Requirement already satisfied: markupsafe>=2.0 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (3.0.2)
Requirement already satisfied: mistune<4,>=2.0.3 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (3.1.2)
Requirement already satisfied: nbclient>=0.5.0 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (0.10.2)
Requirement already satisfied: nbformat>=5.7 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (5.10.4)
Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from nbconvert) (24.2)
Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (1.5.1)
Requirement already satisfied: pygments>=2.4.1 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (2.18.0)
Requirement already satisfied: traitlets>=5.1 in /usr/local/lib/python3.11/dist-packages (from nbconvert) (5.7.1)
Requirement already satisfied: webencodings in /usr/local/lib/python3.11/dist-packages (from bleach!=5.0.0->bleach[css]!=5.0.0->nbconvert) (0.5.1)
Requirement already satisfied: tinycss2<1.5,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from bleach[css]!=5.0.0->nbconvert) (1.4.0)
Requirement already satisfied: platformdirs>=2.5 in /usr/local/lib/python3.11/dist-packages (from jupyter-core>=4.7->nbconvert) (4.3.6)
Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.11/dist-packages (from nbclient>=0.5.0->nbconvert) (6.1.12)
Requirement already satisfied: fastjsonschema>=2.15 in /usr/local/lib/python3.11/dist-packages (from nbformat>=5.7->nbconvert) (2.21.1)
Requirement already satisfied: jsonschema>=2.6 in /usr/local/lib/python3.11/dist-packages (from nbformat>=5.7->nbconvert) (4.23.0)
Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.11/dist-packages (from beautifulsoup4->nbconvert) (2.6)
Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.11/dist-packages (from beautifulsoup4->nbconvert) (4.12.2)
Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (25.1.0)
Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (2024.10.1)
Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (0.36.2)
Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=2.6->nbformat>=5.7->nbconvert) (0.23.1)
Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.11/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (24.0.1)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.11/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (2.8.2)
Requirement already satisfied: tornado>=4.1 in /usr/local/lib/python3.11/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (6.4.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.1->jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (1.17.0)
In [ ]:
from google.colab import drive
import os

# 1. Montar Google Drive
drive.mount('/content/drive')

# 2. Definir la ruta del archivo
input_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/Evidencia.ipynb"
output_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/Evidencia.html"

# 3. Convertir a HTML usando nbconvert
!jupyter nbconvert --to html "$input_path" --output "$output_path"

print(f"Archivo convertido y guardado en: {output_path}")
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
[NbConvertApp] Converting notebook /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/Evidencia.ipynb to html
[NbConvertApp] WARNING | Alternative text is missing on 40 image(s).
[NbConvertApp] Writing 3522774 bytes to /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/Evidencia.html
Archivo convertido y guardado en: /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/Evidencia.html
In [ ]:
# Import necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller, acf, pacf
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import warnings
warnings.filterwarnings("ignore")

# Step 1: Data Preparation
# Corrected PDI data from the updated table
data = {
    'YEAR': [f"{year}.{quarter}" for year in range(1970, 1992) for quarter in range(1, 5)],
    'PDI': [
        1990.6, 2020.1, 2045.3, 2045.2, 2073.9, 2098.0, 2106.6, 2121.1, 2129.7, 2149.1, 2193.9, 2272.0,
        2300.7, 2315.2, 2337.9, 2382.7, 2334.7, 2304.5, 2315.0, 2313.7, 2282.5, 2390.3, 2354.4, 2389.4,
        2424.5, 2434.9, 2444.7, 2459.5, 2463.0, 2490.3, 2541.0, 2556.2, 2587.3, 2631.9, 2653.2, 2680.9,
        2699.2, 2697.6, 2715.3, 2728.1, 2742.9, 2692.0, 2722.5, 2777.0, 2783.7, 2776.7, 2814.1, 2808.8,
        2795.0, 2824.8, 2829.0, 2832.6, 2843.6, 2867.0, 2903.0, 2960.6, 3033.2, 3065.9, 3102.7, 3118.5,
        3123.6, 3189.6, 3156.5, 3178.7, 3227.5, 3281.4, 3272.6, 3266.2, 3295.2, 3241.7, 3285.7, 3335.8,
        3380.1, 3386.3, 3407.5, 3443.1, 3473.9, 3450.9, 3466.9, 3493.0, 3531.4, 3545.3, 3547.0, 3529.5,
        3514.8, 3537.4, 3539.9, 3547.5
    ]
}

df = pd.DataFrame(data)
df['YEAR'] = pd.to_datetime(df['YEAR'].str.replace('.', '-'), format='%Y-%m')
df.set_index('YEAR', inplace=True)
df['log_PDI'] = np.log(df['PDI'])

# Step 2: Exploratory Analysis
plt.figure(figsize=(10, 6))
plt.plot(df['log_PDI'], label='Log PDI')
plt.title('Log PDI Time Series')
plt.xlabel('Year')
plt.ylabel('Log PDI')
plt.legend()
plt.show()

# ADF Test for stationarity
def adf_test(series, title=''):
    result = adfuller(series.dropna())
    print(f'ADF Test on {title}')
    print(f'ADF Statistic: {result[0]}')
    print(f'p-value: {result[1]}')
    print(f'Critical Values: {result[4]}')
    print('Stationary' if result[1] < 0.05 else 'Non-Stationary')

adf_test(df['log_PDI'], 'Log PDI')

# Step 3: Stationarity Adjustment (Differencing)
df['log_PDI_diff'] = df['log_PDI'].diff().dropna()
plt.figure(figsize=(10, 6))
plt.plot(df['log_PDI_diff'], label='First Difference of Log PDI')
plt.title('First Difference of Log PDI')
plt.xlabel('Year')
plt.ylabel('First Difference')
plt.legend()
plt.show()

adf_test(df['log_PDI_diff'], 'First Difference of Log PDI')

# Step 4: Model Identification (ACF and PACF)
plt.figure(figsize=(12, 6))
plt.subplot(121)
plot_acf(df['log_PDI_diff'].dropna(), ax=plt.gca(), lags=20)
plt.title('ACF of Differenced Log PDI')
plt.subplot(122)
plot_pacf(df['log_PDI_diff'].dropna(), ax=plt.gca(), lags=20)
plt.title('PACF of Differenced Log PDI')
plt.show()

# Step 5: Model Estimation
# Trying ARIMA(0,1,1) based on previous feedback (simpler model)
model = ARIMA(df['log_PDI'], order=(0, 1, 1))
results = model.fit()
print(results.summary())

# Step 6: Model Diagnostics
residuals = results.resid
plt.figure(figsize=(10, 6))
plt.plot(residuals, label='Residuals')
plt.title('Residuals of ARIMA(0,1,1)')
plt.xlabel('Year')
plt.ylabel('Residuals')
plt.legend()
plt.show()

plot_acf(residuals, lags=20)
plt.title('ACF of Residuals')
plt.show()

# Step 7: Model Selection (AIC/BIC already in summary)
# Optionally fit other models (e.g., ARIMA(1,1,1), ARIMA(1,1,0)) and compare AIC/BIC

# Step 8: Forecasting
forecast = results.forecast(steps=8)
plt.figure(figsize=(10, 6))
plt.plot(df['log_PDI'], label='Observed')
plt.plot(forecast, label='Forecast', color='red')
plt.title('Log PDI Forecast')
plt.xlabel('Year')
plt.ylabel('Log PDI')
plt.legend()
plt.show()
No description has been provided for this image
ADF Test on Log PDI
ADF Statistic: -1.4407471034265025
p-value: 0.5625931773883976
Critical Values: {'1%': -3.512738056978279, '5%': -2.8974898650628984, '10%': -2.585948732897085}
Non-Stationary
No description has been provided for this image
ADF Test on First Difference of Log PDI
ADF Statistic: -4.66915965544451
p-value: 9.612498486181354e-05
Critical Values: {'1%': -3.512738056978279, '5%': -2.8974898650628984, '10%': -2.585948732897085}
Stationary
No description has been provided for this image
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                log_PDI   No. Observations:                   88
Model:                 ARIMA(0, 1, 1)   Log Likelihood                 260.182
Date:                Fri, 14 Mar 2025   AIC                           -516.364
Time:                        18:00:16   BIC                           -511.433
Sample:                             0   HQIC                          -514.379
                                 - 88                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
ma.L1          0.1860      0.079      2.347      0.019       0.031       0.341
sigma2         0.0001   1.71e-05      8.650      0.000       0.000       0.000
===================================================================================
Ljung-Box (L1) (Q):                   4.64   Jarque-Bera (JB):                28.85
Prob(Q):                              0.03   Prob(JB):                         0.00
Heteroskedasticity (H):               0.37   Skew:                             0.13
Prob(H) (two-sided):                  0.01   Kurtosis:                         5.81
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
In [ ]:
print(forecast)
88    8.174418
89    8.174418
90    8.174418
91    8.174418
92    8.174418
93    8.174418
94    8.174418
95    8.174418
Name: predicted_mean, dtype: float64
In [ ]:
# Import necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import warnings
warnings.filterwarnings("ignore")

# Step 1: Data Preparation
data = {
    'YEAR': [f"{year}.{quarter}" for year in range(1970, 1992) for quarter in range(1, 5)],
    'PDI': [
        1990.6, 2020.1, 2045.3, 2045.2, 2073.9, 2098.0, 2106.6, 2121.1, 2129.7, 2149.1, 2193.9, 2272.0,
        2300.7, 2315.2, 2337.9, 2382.7, 2334.7, 2304.5, 2315.0, 2313.7, 2282.5, 2390.3, 2354.4, 2389.4,
        2424.5, 2434.9, 2444.7, 2459.5, 2463.0, 2490.3, 2541.0, 2556.2, 2587.3, 2631.9, 2653.2, 2680.9,
        2699.2, 2697.6, 2715.3, 2728.1, 2742.9, 2692.0, 2722.5, 2777.0, 2783.7, 2776.7, 2814.1, 2808.8,
        2795.0, 2824.8, 2829.0, 2832.6, 2843.6, 2867.0, 2903.0, 2960.6, 3033.2, 3065.9, 3102.7, 3118.5,
        3123.6, 3189.6, 3156.5, 3178.7, 3227.5, 3281.4, 3272.6, 3266.2, 3295.2, 3241.7, 3285.7, 3335.8,
        3380.1, 3386.3, 3407.5, 3443.1, 3473.9, 3450.9, 3466.9, 3493.0, 3531.4, 3545.3, 3547.0, 3529.5,
        3514.8, 3537.4, 3539.9, 3547.5
    ]
}

df = pd.DataFrame(data)
df['YEAR'] = pd.to_datetime(df['YEAR'].str.replace('.', '-'), format='%Y-%m')
df.set_index('YEAR', inplace=True)
df['log_PDI'] = np.log(df['PDI'])

# Step 2: Exploratory Analysis and Stationarity
df['log_PDI_diff'] = df['log_PDI'].diff().dropna()
adf_test = adfuller(df['log_PDI_diff'].dropna())
print('ADF Test on First Difference of Log PDI')
print(f'ADF Statistic: {adf_test[0]}')
print(f'p-value: {adf_test[1]}')
print(f'Critical Values: {adf_test[4]}')
print('Stationary' if adf_test[1] < 0.05 else 'Non-Stationary')

# Step 3: Model Identification (ACF and PACF)
plt.figure(figsize=(12, 6))
plt.subplot(121)
plot_acf(df['log_PDI_diff'].dropna(), ax=plt.gca(), lags=20)
plt.title('ACF of Differenced Log PDI')
plt.subplot(122)
plot_pacf(df['log_PDI_diff'].dropna(), ax=plt.gca(), lags=20)
plt.title('PACF of Differenced Log PDI')
plt.show()

# Step 4: Model Estimation with ARIMA(1,1,0)
model = ARIMA(df['log_PDI'], order=(1, 1, 0))  # AR(1) on differenced series
results = model.fit()
print(results.summary())

# Step 5: Model Diagnostics
residuals = results.resid
plt.figure(figsize=(10, 6))
plt.plot(residuals, label='Residuals')
plt.title('Residuals of ARIMA(1,1,0)')
plt.xlabel('Year')
plt.ylabel('Residuals')
plt.legend()
plt.show()

plot_acf(residuals, lags=20)
plt.title('ACF of Residuals')
plt.show()

# Step 6: Forecasting
forecast = results.forecast(steps=8)
forecast_index = pd.date_range(start=df.index[-1], periods=9, freq='QS')[1:]
forecast_series = pd.Series(forecast, index=forecast_index)

plt.figure(figsize=(10, 6))
plt.plot(df['log_PDI'], label='Observed')
plt.plot(forecast_series, label='Forecast', color='red')
plt.title('Log PDI Forecast with ARIMA(1,1,0)')
plt.xlabel('Year')
plt.ylabel('Log PDI')
plt.legend()
plt.show()

print('Forecast Values:')
print(forecast_series)
ADF Test on First Difference of Log PDI
ADF Statistic: -4.66915965544451
p-value: 9.612498486181354e-05
Critical Values: {'1%': -3.512738056978279, '5%': -2.8974898650628984, '10%': -2.585948732897085}
Stationary
No description has been provided for this image
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                log_PDI   No. Observations:                   88
Model:                 ARIMA(1, 1, 0)   Log Likelihood                 260.916
Date:                Fri, 14 Mar 2025   AIC                           -517.832
Time:                        18:05:37   BIC                           -512.900
Sample:                             0   HQIC                          -515.846
                                 - 88                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
ar.L1          0.2430      0.083      2.924      0.003       0.080       0.406
sigma2         0.0001   1.66e-05      8.766      0.000       0.000       0.000
===================================================================================
Ljung-Box (L1) (Q):                   6.59   Jarque-Bera (JB):                33.06
Prob(Q):                              0.01   Prob(JB):                         0.00
Heteroskedasticity (H):               0.38   Skew:                             0.13
Prob(H) (two-sided):                  0.01   Kurtosis:                         6.01
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
Forecast Values:
1991-07-01   NaN
1991-10-01   NaN
1992-01-01   NaN
1992-04-01   NaN
1992-07-01   NaN
1992-10-01   NaN
1993-01-01   NaN
1993-04-01   NaN
Freq: QS-JAN, Name: predicted_mean, dtype: float64
In [ ]:
# Import necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import warnings
warnings.filterwarnings("ignore")

# Step 1: Data Preparation
data = {
    'YEAR': [f"{year}.{quarter}" for year in range(1970, 1992) for quarter in range(1, 5)],
    'PDI': [
        1990.6, 2020.1, 2045.3, 2045.2, 2073.9, 2098.0, 2106.6, 2121.1, 2129.7, 2149.1, 2193.9, 2272.0,
        2300.7, 2315.2, 2337.9, 2382.7, 2334.7, 2304.5, 2315.0, 2313.7, 2282.5, 2390.3, 2354.4, 2389.4,
        2424.5, 2434.9, 2444.7, 2459.5, 2463.0, 2490.3, 2541.0, 2556.2, 2587.3, 2631.9, 2653.2, 2680.9,
        2699.2, 2697.6, 2715.3, 2728.1, 2742.9, 2692.0, 2722.5, 2777.0, 2783.7, 2776.7, 2814.1, 2808.8,
        2795.0, 2824.8, 2829.0, 2832.6, 2843.6, 2867.0, 2903.0, 2960.6, 3033.2, 3065.9, 3102.7, 3118.5,
        3123.6, 3189.6, 3156.5, 3178.7, 3227.5, 3281.4, 3272.6, 3266.2, 3295.2, 3241.7, 3285.7, 3335.8,
        3380.1, 3386.3, 3407.5, 3443.1, 3473.9, 3450.9, 3466.9, 3493.0, 3531.4, 3545.3, 3547.0, 3529.5,
        3514.8, 3537.4, 3539.9, 3547.5
    ]
}

df = pd.DataFrame(data)
df['YEAR'] = pd.to_datetime(df['YEAR'].str.replace('.', '-'), format='%Y-%m')
df.set_index('YEAR', inplace=True)
df['log_PDI'] = np.log(df['PDI'])

# Step 2: Exploratory Analysis and Stationarity
df['log_PDI_diff'] = df['log_PDI'].diff().dropna()
adf_test = adfuller(df['log_PDI_diff'].dropna())
print('ADF Test on First Difference of Log PDI')
print(f'ADF Statistic: {adf_test[0]}')
print(f'p-value: {adf_test[1]}')
print(f'Critical Values: {adf_test[4]}')
print('Stationary' if adf_test[1] < 0.05 else 'Non-Stationary')

# Step 3: Model Identification (ACF and PACF)
plt.figure(figsize=(12, 6))
plt.subplot(121)
plot_acf(df['log_PDI_diff'].dropna(), ax=plt.gca(), lags=20)
plt.title('ACF of Differenced Log PDI')
plt.subplot(122)
plot_pacf(df['log_PDI_diff'].dropna(), ax=plt.gca(), lags=20)
plt.title('PACF of Differenced Log PDI')
plt.show()

# Step 4: Model Estimation with ARIMA(1,1,0)
model = ARIMA(df['log_PDI'], order=(1, 1, 0))
results = model.fit()
print(results.summary())

# Step 5: Model Diagnostics
residuals = results.resid
plt.figure(figsize=(10, 6))
plt.plot(residuals, label='Residuals')
plt.title('Residuals of ARIMA(1,1,0)')
plt.xlabel('Year')
plt.ylabel('Residuals')
plt.legend()
plt.show()

plot_acf(residuals, lags=20)
plt.title('ACF of Residuals')
plt.show()

# Step 6: Forecasting
# Generate forecast
forecast_obj = results.get_forecast(steps=8)
forecast = forecast_obj.predicted_mean
forecast_ci = forecast_obj.conf_int()

# Create index for forecast period (1991.4 to 1993.3)
forecast_index = pd.date_range(start=df.index[-1] + pd.offsets.QuarterBegin(1), periods=8, freq='QS')

# Create forecast Series with proper index
forecast_series = pd.Series(forecast.values, index=forecast_index)

# Plot observed data and forecast
plt.figure(figsize=(10, 6))
plt.plot(df['log_PDI'], label='Observed')
plt.plot(forecast_series, label='Forecast', color='red')
plt.fill_between(forecast_series.index,
                 forecast_ci.iloc[:, 0],
                 forecast_ci.iloc[:, 1], color='pink', alpha=0.3, label='95% CI')
plt.title('Log PDI Forecast with ARIMA(1,1,0)')
plt.xlabel('Year')
plt.ylabel('Log PDI')
plt.legend()
plt.show()

print('Forecast Values:')
print(forecast_series)
ADF Test on First Difference of Log PDI
ADF Statistic: -4.66915965544451
p-value: 9.612498486181354e-05
Critical Values: {'1%': -3.512738056978279, '5%': -2.8974898650628984, '10%': -2.585948732897085}
Stationary
No description has been provided for this image
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                log_PDI   No. Observations:                   88
Model:                 ARIMA(1, 1, 0)   Log Likelihood                 260.916
Date:                Fri, 14 Mar 2025   AIC                           -517.832
Time:                        18:08:05   BIC                           -512.900
Sample:                             0   HQIC                          -515.846
                                 - 88                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
ar.L1          0.2430      0.083      2.924      0.003       0.080       0.406
sigma2         0.0001   1.66e-05      8.766      0.000       0.000       0.000
===================================================================================
Ljung-Box (L1) (Q):                   6.59   Jarque-Bera (JB):                33.06
Prob(Q):                              0.01   Prob(JB):                         0.00
Heteroskedasticity (H):               0.38   Skew:                             0.13
Prob(H) (two-sided):                  0.01   Kurtosis:                         6.01
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
Forecast Values:
1991-07-01    8.174520
1991-10-01    8.174646
1992-01-01    8.174677
1992-04-01    8.174685
1992-07-01    8.174686
1992-10-01    8.174687
1993-01-01    8.174687
1993-04-01    8.174687
Freq: QS-JAN, dtype: float64
In [ ]:
# Import necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import warnings
warnings.filterwarnings("ignore")

# Step 1: Data Preparation
data = {
    'YEAR': [f"{year}.{quarter}" for year in range(1970, 1992) for quarter in range(1, 5)],
    'PDI': [
        1990.6, 2020.1, 2045.3, 2045.2, 2073.9, 2098.0, 2106.6, 2121.1, 2129.7, 2149.1, 2193.9, 2272.0,
        2300.7, 2315.2, 2337.9, 2382.7, 2334.7, 2304.5, 2315.0, 2313.7, 2282.5, 2390.3, 2354.4, 2389.4,
        2424.5, 2434.9, 2444.7, 2459.5, 2463.0, 2490.3, 2541.0, 2556.2, 2587.3, 2631.9, 2653.2, 2680.9,
        2699.2, 2697.6, 2715.3, 2728.1, 2742.9, 2692.0, 2722.5, 2777.0, 2783.7, 2776.7, 2814.1, 2808.8,
        2795.0, 2824.8, 2829.0, 2832.6, 2843.6, 2867.0, 2903.0, 2960.6, 3033.2, 3065.9, 3102.7, 3118.5,
        3123.6, 3189.6, 3156.5, 3178.7, 3227.5, 3281.4, 3272.6, 3266.2, 3295.2, 3241.7, 3285.7, 3335.8,
        3380.1, 3386.3, 3407.5, 3443.1, 3473.9, 3450.9, 3466.9, 3493.0, 3531.4, 3545.3, 3547.0, 3529.5,
        3514.8, 3537.4, 3539.9, 3547.5
    ]
}

df = pd.DataFrame(data)
df['YEAR'] = pd.to_datetime(df['YEAR'].str.replace('.', '-'), format='%Y-%m')
df.set_index('YEAR', inplace=True)
df['log_PDI'] = np.log(df['PDI'])

# Step 2: Exploratory Analysis and Stationarity
df['log_PDI_diff'] = df['log_PDI'].diff().dropna()
adf_test = adfuller(df['log_PDI_diff'].dropna())
print('ADF Test on First Difference of Log PDI')
print(f'ADF Statistic: {adf_test[0]}')
print(f'p-value: {adf_test[1]}')
print(f'Critical Values: {adf_test[4]}')
print('Stationary' if adf_test[1] < 0.05 else 'Non-Stationary')

# Step 3: Model Estimation with ARIMA(1,1,1)
model = ARIMA(df['log_PDI'], order=(1, 1, 1), enforce_stationarity=True)
results = model.fit()
print(results.summary())

# Step 4: Model Diagnostics
residuals = results.resid
plt.figure(figsize=(10, 6))
plt.plot(residuals, label='Residuals')
plt.title('Residuals of ARIMA(1,1,1)')
plt.xlabel('Year')
plt.ylabel('Residuals')
plt.legend()
plt.show()

plot_acf(residuals, lags=20)
plt.title('ACF of Residuals')
plt.show()

# Step 5: Forecasting
forecast_obj = results.get_forecast(steps=8)
forecast = forecast_obj.predicted_mean
forecast_ci = forecast_obj.conf_int()

forecast_index = pd.date_range(start=df.index[-1] + pd.offsets.QuarterBegin(1), periods=8, freq='QS')
forecast_series = pd.Series(forecast.values, index=forecast_index)

plt.figure(figsize=(10, 6))
plt.plot(df['log_PDI'], label='Observed')
plt.plot(forecast_series, label='Forecast', color='red')
plt.fill_between(forecast_series.index,
                 forecast_ci.iloc[:, 0],
                 forecast_ci.iloc[:, 1], color='pink', alpha=0.3, label='95% CI')
plt.title('Log PDI Forecast with ARIMA(1,1,1)')
plt.xlabel('Year')
plt.ylabel('Log PDI')
plt.legend()
plt.show()

print('Forecast Values:')
print(forecast_series)
ADF Test on First Difference of Log PDI
ADF Statistic: -4.66915965544451
p-value: 9.612498486181354e-05
Critical Values: {'1%': -3.512738056978279, '5%': -2.8974898650628984, '10%': -2.585948732897085}
Stationary
                               SARIMAX Results                                
==============================================================================
Dep. Variable:                log_PDI   No. Observations:                   88
Model:                 ARIMA(1, 1, 1)   Log Likelihood                 270.603
Date:                Fri, 14 Mar 2025   AIC                           -535.206
Time:                        18:11:18   BIC                           -527.808
Sample:                             0   HQIC                          -532.227
                                 - 88                                         
Covariance Type:                  opg                                         
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
ar.L1          0.9998      0.004    239.383      0.000       0.992       1.008
ma.L1         -0.9886      0.146     -6.768      0.000      -1.275      -0.702
sigma2         0.0001   1.94e-05      5.811      0.000    7.47e-05       0.000
===================================================================================
Ljung-Box (L1) (Q):                   0.23   Jarque-Bera (JB):                16.88
Prob(Q):                              0.63   Prob(JB):                         0.00
Heteroskedasticity (H):               0.40   Skew:                             0.13
Prob(H) (two-sided):                  0.02   Kurtosis:                         5.14
===================================================================================

Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
Forecast Values:
1991-07-01    8.180231
1991-10-01    8.186463
1992-01-01    8.192694
1992-04-01    8.198924
1992-07-01    8.205152
1992-10-01    8.211380
1993-01-01    8.217607
1993-04-01    8.223832
Freq: QS-JAN, dtype: float64
In [ ]:
from google.colab import drive
import os

# 1. Montar Google Drive
drive.mount('/content/drive')

# 2. Definir la ruta del archivo
input_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 1.ipynb"
output_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 1.html"

# 3. Convertir a HTML usando nbconvert
!jupyter nbconvert --to html "$input_path" --output "$output_path"

print(f"Archivo convertido y guardado en: {output_path}")
Mounted at /content/drive
[NbConvertApp] Converting notebook /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 1.ipynb to html
[NbConvertApp] WARNING | Alternative text is missing on 17 image(s).
[NbConvertApp] Writing 961259 bytes to /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 1.html
Archivo convertido y guardado en: /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 1.html
In [ ]:
# Import necessary libraries
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, coint
from statsmodels.tsa.api import VAR

# Data from Table 22.7 (partial sample provided, extend to 315 observations as needed)
data = {
    'Date': ['82 days 14:01:00', '82 days 14:02:00', '82 days 14:03:00', '82 days 14:04:00',
             '82 days 14:05:00', '82 days 14:06:00', '82 days 14:07:00', '82 days 14:08:00',
             # Add remaining dates up to 315 observations (March 2008)
             # For brevity, only a subset is shown here; full data should be sourced from the textbook site
             '83 days 09:04:00', '83 days 09:05:00', '83 days 09:06:00'],
    'TB3M': [12.92, 14.28, 13.31, 13.34, 12.71, 13.08, 11.86, 9.0,
             # Extend to 315
             3.97, 3.7, 3.57],
    'TB6M': [13.9, 14.81, 13.83, 13.87, 13.13, 13.76, 12.8, 10.51,
             # Extend to 315
             3.99, 3.74, 3.56]
}

# Create DataFrame
df = pd.DataFrame(data)
df['Date'] = pd.to_datetime(df['Date'], format='%j days %H:%M:%S', errors='coerce')
df.set_index('Date', inplace=True)

# a. Plot the two time series
plt.figure(figsize=(10, 6))
plt.plot(df['TB3M'], label='3-Month Treasury Bill Rate (TB3M)')
plt.plot(df['TB6M'], label='6-Month Treasury Bill Rate (TB6M)')
plt.title('3-Month and 6-Month Treasury Bill Rates (Jan 1982 - March 2008)')
plt.xlabel('Date')
plt.ylabel('Rate (%)')
plt.legend()
plt.grid(True)
plt.show()

# b. Unit root analysis (Augmented Dickey-Fuller Test)
def adf_test(series, title=''):
    result = adfuller(series.dropna(), autolag='AIC')
    print(f'ADF Test for {title}:')
    print(f'ADF Statistic: {result[0]}')
    print(f'p-value: {result[1]}')
    print(f'Critical Values: {result[4]}')
    print('Stationary' if result[1] < 0.05 else 'Non-Stationary')
    print()

adf_test(df['TB3M'], 'TB3M')
adf_test(df['TB6M'], 'TB6M')

# First differences for stationarity check
df['TB3M_diff'] = df['TB3M'].diff().dropna()
df['TB6M_diff'] = df['TB6M'].diff().dropna()
adf_test(df['TB3M_diff'], 'TB3M First Difference')
adf_test(df['TB6M_diff'], 'TB6M First Difference')

# c. Cointegration test
cointegration_test = coint(df['TB3M'].dropna(), df['TB6M'].dropna())
print('Cointegration Test:')
print(f'Test Statistic: {cointegration_test[0]}')
print(f'p-value: {cointegration_test[1]}')
print(f'Critical Values: {cointegration_test[2]}')
print('Cointegrated' if cointegration_test[1] < 0.05 else 'Not Cointegrated')

# e. VAR model (4 lags) - Example with levels and differences
# Using levels
model_levels = VAR(df[['TB3M', 'TB6M']].dropna())
results_levels = model_levels.fit(4)
print(results_levels.summary())

# Using first differences
model_diff = VAR(df[['TB3M_diff', 'TB6M_diff']].dropna())
results_diff = model_diff.fit(4)
print(results_diff.summary())
No description has been provided for this image
ADF Test for TB3M:
ADF Statistic: 2.5352999667961753
p-value: 0.9990603564911416
Critical Values: {'1%': -4.9386902332361515, '5%': -3.477582857142857, '10%': -2.8438679591836733}
Non-Stationary

ADF Test for TB6M:
ADF Statistic: 2.514491237847768
p-value: 0.9990543748408655
Critical Values: {'1%': -4.9386902332361515, '5%': -3.477582857142857, '10%': -2.8438679591836733}
Non-Stationary

ADF Test for TB3M First Difference:
ADF Statistic: 0.1719697403166656
p-value: 0.9706449085382642
Critical Values: {'1%': -5.354256481481482, '5%': -3.6462381481481483, '10%': -2.901197777777778}
Non-Stationary

ADF Test for TB6M First Difference:
ADF Statistic: -1.817481276011643
p-value: 0.37181791091805705
Critical Values: {'1%': -4.9386902332361515, '5%': -3.477582857142857, '10%': -2.8438679591836733}
Non-Stationary

Cointegration Test:
Test Statistic: -2.487511519775288
p-value: 0.2846044331657215
Critical Values: [-5.3269  -4.01537 -3.49577]
Not Cointegrated
  Summary of Regression Results   
==================================
Model:                         VAR
Method:                        OLS
Date:           Fri, 14, Mar, 2025
Time:                     18:50:41
--------------------------------------------------------------------
No. of Equations:         2.00000    BIC:                   -121.660
Nobs:                     7.00000    HQIC:                  -123.240
Log likelihood:           423.458    FPE:                6.26310e-54
AIC:                     -121.521    Det(Omega_mle):     1.19880e-54
--------------------------------------------------------------------
Results for equation TB3M
==========================================================================
             coefficient       std. error           t-stat            prob
--------------------------------------------------------------------------
const          -4.988552         0.000000    -12879751.678           0.000
L1.TB3M         5.182263         0.000000    312903340.816           0.000
L1.TB6M        -5.023047         0.000000   -104114368.846           0.000
L2.TB3M         5.898194         0.000000     37624155.431           0.000
L2.TB6M        -4.710390         0.000000    -39510967.875           0.000
L3.TB3M        -0.970892              NAN              NAN             NAN
L3.TB6M         1.569952              NAN              NAN             NAN
L4.TB3M         1.597356         0.000000     56365530.566           0.000
L4.TB6M        -1.809204         0.000000    -30095828.671           0.000
==========================================================================

Results for equation TB6M
==========================================================================
             coefficient       std. error           t-stat            prob
--------------------------------------------------------------------------
const          -1.002243         0.000000    -10756420.891           0.000
L1.TB3M         5.276281         0.000000   1324281330.009           0.000
L1.TB6M        -3.996487         0.000000   -344336731.902           0.000
L2.TB3M         0.153701         0.000000      4075547.627           0.000
L2.TB6M        -0.104216         0.000000     -3633750.029           0.000
L3.TB3M         0.123280              NAN              NAN             NAN
L3.TB6M        -0.524243              NAN              NAN             NAN
L4.TB3M         0.588122         0.000000     86266246.938           0.000
L4.TB6M        -0.217372         0.000000    -15030856.435           0.000
==========================================================================

Correlation matrix of residuals
            TB3M      TB6M
TB3M    1.000000  0.819346
TB6M    0.819346  1.000000



  Summary of Regression Results   
==================================
Model:                         VAR
Method:                        OLS
Date:           Fri, 14, Mar, 2025
Time:                     18:50:41
--------------------------------------------------------------------
No. of Equations:         2.00000    BIC:                   -126.918
Nobs:                     6.00000    HQIC:                  -128.794
Log likelihood:           379.854    FPE:                8.78124e-57
AIC:                     -126.294    Det(Omega_mle):     1.40500e-57
--------------------------------------------------------------------
Results for equation TB3M_diff
===============================================================================
                  coefficient       std. error           t-stat            prob
-------------------------------------------------------------------------------
const               -0.048306              NAN              NAN             NAN
L1.TB3M_diff         3.850431              NAN              NAN             NAN
L1.TB6M_diff        -2.563821              NAN              NAN             NAN
L2.TB3M_diff        -0.977365              NAN              NAN             NAN
L2.TB6M_diff         0.186169              NAN              NAN             NAN
L3.TB3M_diff         0.682397              NAN              NAN             NAN
L3.TB6M_diff        -0.057755              NAN              NAN             NAN
L4.TB3M_diff         0.440437              NAN              NAN             NAN
L4.TB6M_diff         1.071488              NAN              NAN             NAN
===============================================================================

Results for equation TB6M_diff
===============================================================================
                  coefficient       std. error           t-stat            prob
-------------------------------------------------------------------------------
const                0.181504              NAN              NAN             NAN
L1.TB3M_diff         4.972046              NAN              NAN             NAN
L1.TB6M_diff        -3.449560              NAN              NAN             NAN
L2.TB3M_diff        -0.996592              NAN              NAN             NAN
L2.TB6M_diff         0.646208              NAN              NAN             NAN
L3.TB3M_diff         0.462100              NAN              NAN             NAN
L3.TB6M_diff        -0.670126              NAN              NAN             NAN
L4.TB3M_diff        -0.116280              NAN              NAN             NAN
L4.TB6M_diff         1.079027              NAN              NAN             NAN
===============================================================================

Correlation matrix of residuals
             TB3M_diff  TB6M_diff
TB3M_diff     1.000000   0.853511
TB6M_diff     0.853511   1.000000



/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/vector_ar/var_model.py:1558: RuntimeWarning: invalid value encountered in sqrt
  stderr = np.sqrt(np.diag(self.cov_params()))
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/base/tsa_model.py:473: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.
  self._init_dates(dates, freq)
/usr/local/lib/python3.11/dist-packages/statsmodels/tsa/vector_ar/var_model.py:1558: RuntimeWarning: invalid value encountered in sqrt
  stderr = np.sqrt(np.diag(self.cov_params()))
In [ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, coint
from statsmodels.tsa.api import VAR

# Create the date range (Jan 1982 to March 2008, 314 months)
dates = pd.date_range(start='1982-01', end='2008-03', freq='ME')

# Simulate data: Linearly interpolate between starting and ending points
n_periods = len(dates)  # 314 periods
tb3m_start, tb3m_end = 12.92, 3.57
tb6m_start, tb6m_end = 13.9, 3.56

# Linear interpolation
tb3m = np.linspace(tb3m_start, tb3m_end, n_periods)
tb6m = np.linspace(tb6m_start, tb6m_end, n_periods)

# Create DataFrame
data = pd.DataFrame({
    'TB3M': tb3m,
    'TB6M': tb6m
}, index=dates)

# Plot
plt.figure(figsize=(10, 6))
plt.plot(data['TB3M'], label='TB3M')
plt.plot(data['TB6M'], label='TB6M')
plt.title('3-Month and 6-Month Treasury Bill Rates (Jan 1982 - March 2008)')
plt.xlabel('Date')
plt.ylabel('Rate (%)')
plt.legend()
plt.grid(True)
plt.show()

# ADF Test with more lags
def adf_test(series, title='', max_lags=12):
    series = series.dropna()  # Ensure no NaN values
    if len(series) < 3:  # Minimum length for ADF test
        print(f"Error: Insufficient data for {title} after differencing. Length: {len(series)}")
        return
    if np.any(np.isinf(series)) or np.any(np.isnan(series)):
        print(f"Error: {title} contains inf or nan values.")
        return
    result = adfuller(series, autolag='AIC', maxlag=max_lags)
    print(f'ADF Test for {title}:')
    print(f'ADF Statistic: {result[0]}')
    print(f'p-value: {result[1]}')
    print(f'Critical Values: {result[4]}')
    print('Stationary' if result[1] < 0.05 else 'Non-Stationary')
    print()

# Run ADF tests
adf_test(data['TB3M'], 'TB3M')
adf_test(data['TB6M'], 'TB6M')

# Compute first differences and test
data['TB3M_diff'] = data['TB3M'].diff().dropna()
data['TB6M_diff'] = data['TB6M'].diff().dropna()

print("Length of TB3M_diff:", len(data['TB3M_diff']))  # Diagnostic
print("Length of TB6M_diff:", len(data['TB6M_diff']))  # Diagnostic
print("NaN check TB3M_diff:", data['TB3M_diff'].isna().sum())
print("Inf check TB3M_diff:", np.isinf(data['TB3M_diff']).sum())

adf_test(data['TB3M_diff'], 'TB3M First Difference')
adf_test(data['TB6M_diff'], 'TB6M First Difference')

# Cointegration (optional, requires full series)
coint_result = coint(data['TB3M'], data['TB6M'])
print('Cointegration Test:')
print(f'Test Statistic: {coint_result[0]}')
print(f'p-value: {coint_result[1]}')
print(f'Critical Values: {coint_result[2]}')
print('Cointegrated' if coint_result[1] < 0.05 else 'Not Cointegrated')

# VAR (after verifying stationarity)
if data['TB3M_diff'].notna().sum() > 10:
    model = VAR(data[['TB3M_diff', 'TB6M_diff']].dropna())
    results = model.fit(maxlags=4, ic='aic')
    print(results.summary())
No description has been provided for this image
ADF Test for TB3M:
ADF Statistic: 0.13281376604838552
p-value: 0.968231338306127
Critical Values: {'1%': -3.4519023023726696, '5%': -2.8710320399170537, '10%': -2.57182745012602}
Non-Stationary

ADF Test for TB6M:
ADF Statistic: 0.1601512844061189
p-value: 0.9699359234163202
Critical Values: {'1%': -3.4514162625887037, '5%': -2.8708187088091406, '10%': -2.5717136883095675}
Non-Stationary

Length of TB3M_diff: 314
Length of TB6M_diff: 314
NaN check TB3M_diff: 1
Inf check TB3M_diff: 0
ADF Test for TB3M First Difference:
ADF Statistic: -20.902287148487336
p-value: 0.0
Critical Values: {'1%': -3.451552879535732, '5%': -2.8708786756338407, '10%': -2.571745666091128}
Stationary

ADF Test for TB6M First Difference:
ADF Statistic: -13.219565830679286
p-value: 1.0081843819354319e-24
Critical Values: {'1%': -3.451552879535732, '5%': -2.8708786756338407, '10%': -2.571745666091128}
Stationary

Cointegration Test:
Test Statistic: -inf
p-value: 0.0
Critical Values: [-3.93177232 -3.35572073 -3.05802792]
Cointegrated
  Summary of Regression Results   
==================================
Model:                         VAR
Method:                        OLS
Date:           Fri, 14, Mar, 2025
Time:                     18:52:40
--------------------------------------------------------------------
No. of Equations:         2.00000    BIC:                   -139.957
Nobs:                     313.000    HQIC:                  -139.971
Log likelihood:           21020.7    FPE:                1.61140e-61
AIC:                     -139.981    Det(Omega_mle):     1.60116e-61
--------------------------------------------------------------------
Results for equation TB3M_diff
=============================================================================
           coefficient       std. error                t-stat            prob
-----------------------------------------------------------------------------
const        -0.029872         0.000000  -771583310363948.125           0.000
=============================================================================

Results for equation TB6M_diff
=============================================================================
           coefficient       std. error                t-stat            prob
-----------------------------------------------------------------------------
const        -0.033035         0.000000  -998085376452980.625           0.000
=============================================================================

Correlation matrix of residuals
             TB3M_diff  TB6M_diff
TB3M_diff     1.000000   0.068255
TB6M_diff     0.068255   1.000000



<ipython-input-22-2f572cb68f82>:71: CollinearityWarning: y0 and y1 are (almost) perfectly colinear.Cointegration test is not reliable in this case.
  coint_result = coint(data['TB3M'], data['TB6M'])
In [ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller, coint
from statsmodels.tsa.api import VAR

# Load actual data (replace with your file path)
# Example: data = pd.read_csv('table22_7.csv')
# For now, use partial data and assume full dataset is similar
dates = pd.date_range(start='1982-01', end='2008-03', freq='ME')
# Placeholder for actual data (replace with real values from Table 22.7)
data = pd.DataFrame({
    'TB3M': [12.92, 14.28, 13.31, 13.34, 12.71, 13.08, 11.86, 9.0, 3.97, 3.7, 3.57] + [3.57] * 303,  # Extend to 314
    'TB6M': [13.9, 14.81, 13.83, 13.87, 13.13, 13.76, 12.8, 10.51, 3.99, 3.74, 3.56] + [3.56] * 303
}, index=dates)

# Plot
plt.figure(figsize=(10, 6))
plt.plot(data['TB3M'], label='TB3M')
plt.plot(data['TB6M'], label='TB6M')
plt.title('3-Month and 6-Month Treasury Bill Rates (Jan 1982 - March 2008)')
plt.xlabel('Date')
plt.ylabel('Rate (%)')
plt.legend()
plt.grid(True)
plt.show()

# ADF Test with more lags
def adf_test(series, title='', max_lags=12):
    series = series.dropna()
    if len(series) < 3:
        print(f"Error: Insufficient data for {title} after differencing. Length: {len(series)}")
        return
    if np.any(np.isinf(series)) or np.any(np.isnan(series)):
        print(f"Error: {title} contains inf or nan values.")
        return
    result = adfuller(series, autolag='AIC', maxlag=max_lags)
    print(f'ADF Test for {title}:')
    print(f'ADF Statistic: {result[0]}')
    print(f'p-value: {result[1]}')
    print(f'Critical Values: {result[4]}')
    print('Stationary' if result[1] < 0.05 else 'Non-Stationary')
    print()

adf_test(data['TB3M'], 'TB3M')
adf_test(data['TB6M'], 'TB6M')

# Compute first differences and drop NaNs from the entire DataFrame
data['TB3M_diff'] = data['TB3M'].diff()
data['TB6M_diff'] = data['TB6M'].diff()
data = data.dropna()  # Drop NaNs from the entire DataFrame

print("Length of data after differencing:", len(data))
adf_test(data['TB3M_diff'], 'TB3M First Difference')
adf_test(data['TB6M_diff'], 'TB6M First Difference')

# Cointegration
coint_result = coint(data['TB3M'], data['TB6M'])
print('Cointegration Test:')
print(f'Test Statistic: {coint_result[0]}')
print(f'p-value: {coint_result[1]}')
print(f'Critical Values: {coint_result[2]}')
print('Cointegrated' if coint_result[1] < 0.05 else 'Not Cointegrated')

# VAR (after verifying stationarity)
if len(data) > 10:
    model = VAR(data[['TB3M_diff', 'TB6M_diff']])
    results = model.fit(maxlags=4, ic='aic')
    print(results.summary())
No description has been provided for this image
ADF Test for TB3M:
ADF Statistic: -7.390830669457961
p-value: 8.009271499113918e-11
Critical Values: {'1%': -3.4514162625887037, '5%': -2.8708187088091406, '10%': -2.5717136883095675}
Stationary

ADF Test for TB6M:
ADF Statistic: -7.464568146490479
p-value: 5.245695450441187e-11
Critical Values: {'1%': -3.4514162625887037, '5%': -2.8708187088091406, '10%': -2.5717136883095675}
Stationary

Length of data after differencing: 313
ADF Test for TB3M First Difference:
ADF Statistic: -11.296616184796992
p-value: 1.3388235131287017e-20
Critical Values: {'1%': -3.4514843502727306, '5%': -2.8708485956333556, '10%': -2.571729625657462}
Stationary

ADF Test for TB6M First Difference:
ADF Statistic: -12.753912598303325
p-value: 8.419246601844197e-24
Critical Values: {'1%': -3.4514843502727306, '5%': -2.8708485956333556, '10%': -2.571729625657462}
Stationary

Cointegration Test:
Test Statistic: -12.97497513991583
p-value: 3.153025521877972e-23
Critical Values: [-3.93188666 -3.35578375 -3.05807153]
Cointegrated
  Summary of Regression Results   
==================================
Model:                         VAR
Method:                        OLS
Date:           Fri, 14, Mar, 2025
Time:                     18:55:20
--------------------------------------------------------------------
No. of Equations:         2.00000    BIC:                   -24.3409
Nobs:                     309.000    HQIC:                  -24.4714
Log likelihood:           2935.37    FPE:                2.15994e-11
AIC:                     -24.5584    Det(Omega_mle):     2.03941e-11
--------------------------------------------------------------------
Results for equation TB3M_diff
===============================================================================
                  coefficient       std. error           t-stat            prob
-------------------------------------------------------------------------------
const               -0.000082         0.000086           -0.956           0.339
L1.TB3M_diff         4.157031         0.001674         2483.830           0.000
L1.TB6M_diff        -4.797386         0.002505        -1914.937           0.000
L2.TB3M_diff         8.924165         0.016546          539.353           0.000
L2.TB6M_diff        -8.496877         0.014888         -570.739           0.000
L3.TB3M_diff         9.624104         0.016524          582.415           0.000
L3.TB6M_diff        -7.735916         0.013158         -587.912           0.000
L4.TB3M_diff         1.984913         0.003762          527.554           0.000
L4.TB6M_diff        -1.576490         0.002973         -530.295           0.000
===============================================================================

Results for equation TB6M_diff
===============================================================================
                  coefficient       std. error           t-stat            prob
-------------------------------------------------------------------------------
const                0.000186         0.000175            1.063           0.288
L1.TB3M_diff         5.600561         0.003430         1632.892           0.000
L1.TB6M_diff        -4.140504         0.005134         -806.474           0.000
L2.TB3M_diff        -0.438867         0.033908          -12.943           0.000
L2.TB6M_diff         0.767947         0.030509           25.171           0.000
L3.TB3M_diff        -2.861810         0.033864          -84.508           0.000
L3.TB6M_diff         2.104379         0.026966           78.039           0.000
L4.TB3M_diff         0.510904         0.007711           66.260           0.000
L4.TB6M_diff        -0.368934         0.006092          -60.557           0.000
===============================================================================

Correlation matrix of residuals
             TB3M_diff  TB6M_diff
TB3M_diff     1.000000   0.021763
TB6M_diff     0.021763   1.000000



/usr/local/lib/python3.11/dist-packages/statsmodels/regression/linear_model.py:955: RuntimeWarning: divide by zero encountered in log
  llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
/usr/local/lib/python3.11/dist-packages/statsmodels/regression/linear_model.py:955: RuntimeWarning: divide by zero encountered in log
  llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
/usr/local/lib/python3.11/dist-packages/statsmodels/regression/linear_model.py:955: RuntimeWarning: divide by zero encountered in log
  llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
/usr/local/lib/python3.11/dist-packages/statsmodels/regression/linear_model.py:955: RuntimeWarning: divide by zero encountered in log
  llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
/usr/local/lib/python3.11/dist-packages/statsmodels/regression/linear_model.py:955: RuntimeWarning: divide by zero encountered in log
  llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
In [ ]:
from google.colab import drive
import os

# 1. Montar Google Drive
drive.mount('/content/drive')

# 2. Definir la ruta del archivo
input_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 2.ipynb"
output_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 2.html"

# 3. Convertir a HTML usando nbconvert
!jupyter nbconvert --to html "$input_path" --output "$output_path"

print(f"Archivo convertido y guardado en: {output_path}")
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
[NbConvertApp] Converting notebook /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 2.ipynb to html
[NbConvertApp] WARNING | Alternative text is missing on 3 image(s).
[NbConvertApp] Writing 501234 bytes to /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 2.html
Archivo convertido y guardado en: /content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/examen 2.html
In [ ]:
from google.colab import drive
import os

# 1. Montar Google Drive
drive.mount('/content/drive')

# 2. Definir la ruta del archivo
input_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/combined_notebook.ipynb"
output_path = "/content/drive/My Drive/6to semestre: feb-jun 2025/series de tiempo/combined_notebook.html"

# 3. Convertir a HTML usando nbconvert
!jupyter nbconvert --to html "$input_path" --output "$output_path"

print(f"Archivo convertido y guardado en: {output_path}")